blob: 68c7d5addb6e49661c6c80588eaf8b062ca46dab [file] [log] [blame]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001#!/usr/bin/env python3
Luke Hutton261b7b62023-01-10 14:50:31 +00002# Copyright (c) 2020-2023, ARM Limited.
Jeremy Johnson015c3552022-02-23 12:15:03 +00003# SPDX-License-Identifier: Apache-2.0
4import argparse
5import os
6import re
7import traceback
8
9import numpy as np
10
11# Level | Level for Humans | Level Description
12# -------|------------------|------------------------------------
13# 0 | DEBUG | [Default] Print all messages
14# 1 | INFO | Filter out INFO messages
15# 2 | WARNING | Filter out INFO & WARNING messages
16# 3 | ERROR | Filter out all messages
17# Filter tensorflow debug message except errors
18os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
19
20# Flake8 E402 - ignore imports not at top of file to allow os.environ setting
21import tensorflow as tf # noqa: E402
22from frameworks.write_test_json import write_test_json # noqa: E402
23from frameworks.arg_gen import ArgGen # noqa: E402
24from frameworks.tensor_gen import TGen # noqa: E402
25from frameworks.test_builder import TBuilder # noqa: E402
Jeremy Johnson5d1a3472022-03-31 09:50:06 +010026from frameworks.test_gen_utils import ( # noqa: E402
Jeremy Johnson015c3552022-02-23 12:15:03 +000027 QuantType,
28 get_tf_dtype,
29 get_shape_str,
30) # noqa: E402
31from tensorflow.lite.python.interpreter import OpResolverType # noqa: E402
32
33# All of the supported frameworks
34ALL_FRAMEWORKS = ["tf", "tflite"]
35
36# Lists of different data types
37TYPE_F = [tf.float32]
38TYPE_I = [tf.int32]
39TYPE_FI = [tf.float32, tf.int32]
40TYPE_B = [tf.bool]
41TYPE_FIB = [tf.float32, tf.int32, tf.bool]
42TYPE_H = [tf.float16]
43TYPE_FH = [tf.float32, tf.float16]
44TYPE_FHI = [tf.float32, tf.float16, tf.int32]
45TYPE_FHIB = [tf.float32, tf.float16, tf.int32, tf.bool]
46
47# The list of operator tests
48# Each dictionary entry for an op is a dictionary with the following required members:
49# 'operands': tuple (number_of_placeholder_tensors, number_of_constant_tensors)
50# 'build_fcn: tuple (Test builder function, Tensor generator function,
51# Argument generator function)
52# 'types': list of Tensorflow types that should be tested for this op
53# OR
54# a dictionary of {'framework_name': [type_list] } for cases where only
55# a subset of the types should be tested in each framework. This can also
56# be used to restrict an operator to a particular framework.
57#
58# And optional members:
59# 'template': boolean (indicates that this is a templated op which gets further
60# processing in createDynamicOpLists)
61# 'bias': boolean indicating that there is a bias component to be generated
62# 'qtypes': List of QuantType quantized types to generate for this op
TatWai Chongfd629052022-07-25 04:01:58 +000063# 'rank': tuple (lowest rank, highest rank). Dimension range of input tensor.
Jeremy Johnson015c3552022-02-23 12:15:03 +000064
65TF_OP_LIST = {
66 "add": {
67 "operands": (2, 0),
68 "build_fcn": (TBuilder.Add, TGen.tgBFuzz, ArgGen.agNone),
69 "types": {
70 "tf": TYPE_FI,
71 "tflite": list(
72 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
73 ),
74 },
75 },
76 "sub": {
77 "operands": (2, 0),
78 "build_fcn": (TBuilder.Sub, TGen.tgBFuzz, ArgGen.agNone),
79 "types": {
80 "tf": TYPE_FI,
81 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
82 # QuantType.ALL_I16 fail in TFLite conversion
83 },
84 },
85 "mul": {
86 "operands": (2, 0),
87 "build_fcn": (TBuilder.Mul, TGen.tgBFuzz, ArgGen.agNone),
88 "types": {
89 "tf": TYPE_FI,
90 "tflite": list(
91 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
92 ),
93 },
94 },
95 "exp": {
96 "operands": (1, 0),
97 "build_fcn": (TBuilder.Exp, TGen.tgBasic, ArgGen.agNone),
98 "types": TYPE_F,
99 },
100 "rcp": {
101 "operands": (1, 0),
102 "build_fcn": (TBuilder.Rcp, TGen.tgBasic, ArgGen.agNone),
103 "types": TYPE_F,
104 },
105 "relu": {
106 "operands": (1, 0),
107 "build_fcn": (TBuilder.Relu, TGen.tgBasic, ArgGen.agNone),
108 "types": {
109 "tf": TYPE_F,
110 "tflite": list(
111 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
112 ),
113 },
114 },
Jerry Ge93912432022-07-22 10:29:13 -0700115 "relu1": {
116 "operands": (1, 0),
117 "build_fcn": (TBuilder.Relu1, TGen.tgBasic, ArgGen.agNone),
118 "types": {
119 "tf": [],
120 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
121 },
122 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000123 "relu6": {
124 "operands": (1, 0),
125 "build_fcn": (TBuilder.Relu6, TGen.tgBasic, ArgGen.agNone),
126 "types": {
127 "tf": TYPE_F,
128 "tflite": list(
129 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
130 ),
131 },
132 },
133 "leaky_relu": {
134 "operands": (1, 0),
135 "build_fcn": (TBuilder.LeakyRelu, TGen.tgBasic, ArgGen.agFloat),
136 "types": {
137 "tf": TYPE_F,
138 "tflite": list(
139 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
140 ),
141 },
142 },
TatWai Chong41a04fe2022-11-03 21:44:32 +0000143 "prelu": {
144 "operands": (1, 0),
145 "build_fcn": (TBuilder.Prelu, TGen.tgBasic, ArgGen.agNone),
146 "types": {
147 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
148 },
149 },
TatWai Chong473eb382022-08-02 04:21:30 +0000150 "gelu": {
151 "operands": (1, 0),
152 "build_fcn": (TBuilder.Gelu, TGen.tgBasic, ArgGen.agNone),
153 "types": {
154 # Need compiler support for tf.Erf.
155 # "tf": TYPE_F,
156 "tflite": list(
157 # Only float32, int8 and uint8 supported currently
158 TYPE_F
159 + [QuantType.ALL_U8, QuantType.ALL_I8]
160 ),
161 },
162 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000163 "concat": {
164 "operands": (2, 0),
165 "build_fcn": (TBuilder.Concat, TGen.tgBasic, ArgGen.agAxes),
166 "types": TYPE_FI,
167 },
168 "bitwise_and": {
169 "operands": (2, 0),
170 "build_fcn": (TBuilder.BitwiseAnd, TGen.tgBFuzz, ArgGen.agNone),
171 "types": {"tf": TYPE_I}, # Not supported in TF Lite
172 },
173 "bitwise_or": {
174 "operands": (2, 0),
175 "build_fcn": (TBuilder.BitwiseOr, TGen.tgBFuzz, ArgGen.agNone),
176 "types": {"tf": TYPE_I}, # Not supported in TF Lite
177 },
178 "bitwise_not": {
179 "operands": (1, 0),
180 "build_fcn": (TBuilder.BitwiseNot, TGen.tgBFuzz, ArgGen.agNone),
181 "types": {"tf": TYPE_I}, # Not supported in TF Lite
182 },
183 "bitwise_xor": {
184 "operands": (2, 0),
185 "build_fcn": (TBuilder.BitwiseXor, TGen.tgBFuzz, ArgGen.agNone),
186 "types": {"tf": TYPE_I}, # Not supported in TF Lite
187 },
188 "logical_and": {
189 "operands": (2, 0),
190 "build_fcn": (TBuilder.LogicalAnd, TGen.tgBFuzz, ArgGen.agNone),
191 "types": TYPE_B,
192 },
193 "logical_or": {
194 "operands": (2, 0),
195 "build_fcn": (TBuilder.LogicalOr, TGen.tgBFuzz, ArgGen.agNone),
196 "types": TYPE_B,
197 },
198 "logical_not": {
199 "operands": (1, 0),
200 "build_fcn": (TBuilder.LogicalNot, TGen.tgBFuzz, ArgGen.agNone),
201 "types": TYPE_B,
202 },
203 "reduce_any": {
204 "operands": (1, 0),
205 "build_fcn": (TBuilder.ReduceAny, TGen.tgBasic, ArgGen.agAxesListKeepdims),
206 "types": TYPE_B,
207 },
208 "reduce_all": {
209 "operands": (1, 0),
210 "build_fcn": (TBuilder.ReduceAll, TGen.tgBasic, ArgGen.agAxesListKeepdims),
TatWai Chong6ad6d6e2022-11-11 15:28:50 -0800211 "types": TYPE_B,
Jeremy Johnson015c3552022-02-23 12:15:03 +0000212 },
213 "reduce_min": {
214 "operands": (1, 0),
215 "build_fcn": (TBuilder.ReduceMin, TGen.tgBasic, ArgGen.agAxesListKeepdims),
216 "types": {
217 "tf": TYPE_FI,
218 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
219 },
220 },
221 "reduce_max": {
222 "operands": (1, 0),
223 "build_fcn": (TBuilder.ReduceMax, TGen.tgBasic, ArgGen.agAxesListKeepdims),
224 "types": {
225 "tf": TYPE_FI,
226 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
227 },
228 },
229 "reduce_sum": {
230 "operands": (1, 0),
231 "build_fcn": (TBuilder.ReduceSum, TGen.tgBasic, ArgGen.agAxesListKeepdims),
232 "types": {
233 "tf": TYPE_F,
234 # v2 converter doesn't recognize quantized reduce_sum
235 # "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
236 "tflite": TYPE_F,
237 },
238 },
239 "reduce_mean": {
240 "operands": (1, 0),
241 "build_fcn": (TBuilder.ReduceMean, TGen.tgBasic, ArgGen.agAxesListKeepdims),
242 "types": {
243 "tf": TYPE_F,
244 "tflite": list(
245 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
246 ),
247 },
248 },
249 "reduce_product": {
250 "operands": (1, 0),
251 "build_fcn": (TBuilder.ReduceProduct, TGen.tgBasic, ArgGen.agAxesListKeepdims),
252 "types": TYPE_F,
253 },
254 "min": {
255 "operands": (2, 0),
256 "build_fcn": (TBuilder.Min, TGen.tgBFuzz, ArgGen.agNone),
257 "types": TYPE_FI,
258 },
259 "max": {
260 "operands": (2, 0),
261 "build_fcn": (TBuilder.Max, TGen.tgBFuzz, ArgGen.agNone),
262 "types": TYPE_FI,
263 },
264 "pow": {
265 "operands": (2, 0),
266 "build_fcn": (TBuilder.Pow, TGen.tgBFuzz, ArgGen.agNone),
267 # Technically, integer is supported, but only for positive exponents.
268 # Needs a random argument generator.
269 "types": TYPE_F,
270 },
271 "abs": {
272 "operands": (1, 0),
273 "build_fcn": (TBuilder.Abs, TGen.tgBasic, ArgGen.agNone),
274 "types": TYPE_F,
275 },
276 "ceil": {
277 "operands": (1, 0),
278 "build_fcn": (TBuilder.Ceil, TGen.tgBasic, ArgGen.agNone),
279 "types": TYPE_F,
280 },
281 "floor": {
282 "operands": (1, 0),
283 "build_fcn": (TBuilder.Floor, TGen.tgBasic, ArgGen.agNone),
284 "types": TYPE_F,
285 },
286 "log": {
287 "operands": (1, 0),
288 "build_fcn": (TBuilder.Log, TGen.tgBasic, ArgGen.agNone),
289 "types": TYPE_F,
290 },
291 "negate": {
292 "operands": (1, 0),
293 "build_fcn": (TBuilder.Negate, TGen.tgBasic, ArgGen.agNone),
294 "types": TYPE_F,
295 },
296 "rsqrt": {
297 "operands": (1, 0),
298 "build_fcn": (TBuilder.Rsqrt, TGen.tgBasic, ArgGen.agNone),
299 "types": TYPE_F,
300 },
TatWai Chongd713a4d2022-11-10 13:54:28 -0800301 "sign": {
302 "operands": (1, 0),
303 "build_fcn": (TBuilder.Sign, TGen.tgBasic, ArgGen.agNone),
304 "types": {
305 "tf": TYPE_F,
306 },
307 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000308 "sigmoid": {
309 "operands": (1, 0),
310 "build_fcn": (TBuilder.Sigmoid, TGen.tgBasic, ArgGen.agNone),
311 "types": {
312 "tf": TYPE_F,
313 "tflite": list(
314 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
315 ),
316 },
317 },
318 "tanh": {
319 "operands": (1, 0),
320 "build_fcn": (TBuilder.Tanh, TGen.tgBasic, ArgGen.agNone),
321 "types": {
322 "tf": TYPE_F,
323 "tflite": list(
324 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
325 ),
326 },
327 },
Luke Hutton41601862022-12-06 17:29:15 +0000328 "sin": {
329 "operands": (1, 0),
330 "build_fcn": (TBuilder.Sin, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000331 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000332 },
333 "cos": {
334 "operands": (1, 0),
335 "build_fcn": (TBuilder.Cos, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000336 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000337 },
Luke Hutton2138a192022-12-15 11:01:39 +0000338 "atan2": {
339 "operands": (2, 0),
340 "build_fcn": (TBuilder.Atan2, TGen.tgBasic, ArgGen.agNone),
341 "types": {
342 "tflite": TYPE_F,
343 },
344 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000345 "square": {
346 "operands": (1, 0),
347 "build_fcn": (TBuilder.Square, TGen.tgBasic, ArgGen.agNone),
348 "types": TYPE_F,
349 },
350 "squared_difference": {
351 "operands": (2, 0),
352 "build_fcn": (TBuilder.SquaredDifference, TGen.tgBFuzz, ArgGen.agNone),
353 "types": TYPE_F,
354 },
355 "equal": {
356 "operands": (2, 0),
357 "build_fcn": (TBuilder.Equal, TGen.tgBFuzz, ArgGen.agNone),
358 "types": TYPE_FI,
359 },
360 "greater_equal": {
361 "operands": (2, 0),
362 "build_fcn": (TBuilder.GreaterEqual, TGen.tgBFuzz, ArgGen.agNone),
363 "types": TYPE_FI,
364 },
365 "greater": {
366 "operands": (2, 0),
367 "build_fcn": (TBuilder.Greater, TGen.tgBFuzz, ArgGen.agNone),
368 "types": TYPE_FI,
369 },
370 "less": {
371 "operands": (2, 0),
372 "build_fcn": (TBuilder.Less, TGen.tgBFuzz, ArgGen.agNone),
373 "types": TYPE_FI,
374 },
375 "less_equal": {
376 "operands": (2, 0),
377 "build_fcn": (TBuilder.LessEqual, TGen.tgBFuzz, ArgGen.agNone),
378 "types": TYPE_FI,
379 },
380 "conv2d_TEMPLATE": {
381 "operands": (1, 1),
382 "build_fcn": (TBuilder.Conv2d, TGen.tgConv2d, ArgGen.agConv2d),
383 "types": {
384 "tf": [tf.float32],
385 "tflite": [
386 tf.float32,
387 QuantType.CONV_U8_U8,
388 QuantType.CONV_I8_I8,
389 QuantType.CONV_I16_I8,
390 ],
391 },
392 "template": True,
393 },
394 "conv2d_relu_TEMPLATE": {
395 "operands": (1, 2),
396 "build_fcn": (TBuilder.Conv2dRelu, TGen.tgConv2d, ArgGen.agNone),
397 "types": {
398 "tf": [tf.float32],
399 "tflite": [
400 tf.float32,
401 QuantType.CONV_U8_U8,
402 QuantType.CONV_I8_I8,
403 QuantType.CONV_I16_I8,
404 ],
405 },
406 "template": True,
407 },
408 "conv2d_relu6_TEMPLATE": {
409 "operands": (1, 2),
410 "build_fcn": (TBuilder.Conv2dRelu6, TGen.tgConv2d, ArgGen.agNone),
411 "types": {
412 "tf": [tf.float32],
413 "tflite": [
414 tf.float32,
415 QuantType.CONV_U8_U8,
416 QuantType.CONV_I8_I8,
417 QuantType.CONV_I16_I8,
418 ],
419 },
420 "template": True,
421 },
422 "conv2d_relu_n1_to_1_TEMPLATE": {
423 "operands": (1, 2),
424 "build_fcn": (TBuilder.Conv2dReluN1To1, TGen.tgConv2d, ArgGen.agNone),
425 "types": {
426 "tf": [tf.float32],
427 "tflite": [
428 tf.float32,
429 QuantType.CONV_U8_U8,
430 QuantType.CONV_I8_I8,
431 QuantType.CONV_I16_I8,
432 ],
433 },
434 "template": True,
435 },
436 # This test is converted as:
437 # tfl.conv2d(){fused_activation_function="NONE"} + tfl.tanh()
438 # TODO: anyway to generate tfl.conv2d(){fused_activation_function="TANH"}?
439 "conv2d_tanh_TEMPLATE": {
440 "operands": (1, 2),
441 "build_fcn": (TBuilder.Conv2dTanh, TGen.tgConv2d, ArgGen.agNone),
442 "types": {
443 "tf": [tf.float32],
444 "tflite": [
445 tf.float32,
446 QuantType.CONV_U8_U8,
447 QuantType.CONV_I8_I8,
448 QuantType.CONV_I16_I8,
449 ],
450 },
451 "template": True,
452 },
453 "conv2d_bias_TEMPLATE": {
454 "operands": (1, 2),
455 "build_fcn": (TBuilder.Conv2dWithBias, TGen.tgConv2d, ArgGen.agConv2d),
456 "types": {
457 "tf": [tf.float32],
458 "tflite": [
459 tf.float32,
460 QuantType.CONV_U8_U8,
461 QuantType.CONV_I8_I8,
462 QuantType.CONV_I16_I8,
463 ],
464 },
465 "bias": True,
466 "template": True,
467 },
TatWai Chongfd629052022-07-25 04:01:58 +0000468 "conv3d_TEMPLATE": {
469 "operands": (1, 1),
470 "build_fcn": (TBuilder.Conv3d, TGen.tgConv3d, ArgGen.agConv3d),
471 "types": {
472 "tf": [tf.float32],
473 "tflite": [
474 tf.float32,
475 QuantType.CONV_U8_U8,
476 QuantType.CONV_I8_I8,
477 # Quantization to 16x8-bit not yet supported by tflite.
478 ],
479 },
480 "template": True,
481 "rank": (1, 5),
482 },
483 "conv3d_bias_TEMPLATE": {
484 "operands": (1, 2),
485 "build_fcn": (TBuilder.Conv3dWithBias, TGen.tgConv3d, ArgGen.agConv3d),
486 "types": {
487 "tf": [tf.float32],
488 "tflite": [
489 tf.float32,
490 QuantType.CONV_U8_U8,
491 QuantType.CONV_I8_I8,
492 # Quantization to 16x8-bit not yet supported by tflite.
493 ],
494 },
495 "bias": True,
496 "template": True,
497 "rank": (1, 5),
498 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000499 "depthwise_conv2d_TEMPLATE": {
500 "operands": (1, 1),
501 "build_fcn": (
502 TBuilder.DepthwiseConv2d,
503 TGen.tgDepthwiseConv2d,
504 ArgGen.agDepthwiseConv2d,
505 ),
506 "types": {
507 "tf": [tf.float32],
508 "tflite": [
509 tf.float32,
510 QuantType.CONV_U8_U8,
511 QuantType.CONV_I8_I8,
512 QuantType.CONV_I16_I8,
513 ],
514 },
515 "template": True,
516 },
517 "depthwise_conv2d_bias_TEMPLATE": {
518 "operands": (1, 2),
519 "build_fcn": (
520 TBuilder.DepthwiseConv2dWithBias,
521 TGen.tgDepthwiseConv2d,
522 ArgGen.agDepthwiseConv2d,
523 ),
524 "types": {
525 "tf": [tf.float32],
526 "tflite": [
527 tf.float32,
528 QuantType.CONV_U8_U8,
529 QuantType.CONV_I8_I8,
530 QuantType.CONV_I16_I8,
531 ],
532 },
533 "bias": True,
534 "template": True,
535 },
536 "transpose_conv2d_TEMPLATE": {
537 "operands": (1, 1),
538 "build_fcn": (
539 TBuilder.TransposeConv2d,
540 TGen.tgTransposeConv2d,
541 ArgGen.agTransposeConv2d,
542 ),
543 "types": {
544 "tf": [tf.float32],
545 "tflite": [
546 tf.float32,
547 QuantType.CONV_U8_U8,
548 QuantType.CONV_I8_I8,
549 QuantType.CONV_I16_I8,
550 ],
551 },
552 "template": True,
553 },
554 "argmax": {
555 "operands": (1, 0),
556 "build_fcn": (TBuilder.Argmax, TGen.tgBasic, ArgGen.agAxes),
557 "types": {"tf": TYPE_F},
558 },
559 "avg_pool2d": {
560 "operands": (1, 0),
561 "build_fcn": (TBuilder.AvgPool2d, TGen.tgPooling, ArgGen.agPooling),
562 "types": {
563 "tf": TYPE_F,
564 "tflite": list(
565 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
566 ),
567 },
568 },
569 "max_pool2d": {
570 "operands": (1, 0),
571 "build_fcn": (TBuilder.MaxPool2d, TGen.tgPooling, ArgGen.agPooling),
572 "types": {
573 "tf": TYPE_F,
574 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
575 # ALL_I16 not supported yet
576 # In tensorflow/compiler/mlir/lite/ir/tfl_ops.td,
577 # QI16 is missing from MaxPoolOperandAndResultConstraints
578 # If adding QI16 back this test can run through.
579 },
580 },
581 "reshape": {
582 "operands": (1, 0),
583 "build_fcn": (TBuilder.Reshape, TGen.tgBasic, ArgGen.agReshape),
584 "types": TYPE_FI,
585 },
586 "transpose": {
587 "operands": (1, 0),
588 "build_fcn": (TBuilder.Transpose, TGen.tgBasic, ArgGen.agTranspose),
589 "types": TYPE_FI,
590 },
591 "slice": {
592 "operands": (1, 0),
593 "build_fcn": (TBuilder.Slice, TGen.tgBasic, ArgGen.agSlice),
594 "types": TYPE_FI,
595 },
596 "strided_slice": {
597 "operands": (1, 0),
598 "build_fcn": (TBuilder.StridedSlice, TGen.tgBasic, ArgGen.agStridedSlice),
599 "types": TYPE_FI,
600 },
601 "select": {
602 "operands": (3, 0),
603 "build_fcn": (TBuilder.Select, TGen.tgSelect, ArgGen.agNone),
604 "types": TYPE_FI,
605 },
606 "addn": {
607 "operands": (4, 0),
608 "build_fcn": (TBuilder.Addn, TGen.tgBasic, ArgGen.agNone),
609 "types": TYPE_FI,
610 },
611 "concatv2": {
612 "operands": (4, 0),
613 "build_fcn": (TBuilder.Concatv2, TGen.tgBasic, ArgGen.agAxes),
614 "types": TYPE_FI,
615 },
616 "stack": {
617 "operands": (4, 0),
618 "build_fcn": (TBuilder.Stack, TGen.tgBasic, ArgGen.agStack),
619 "types": TYPE_FI,
620 },
621 "unstack": {
622 "operands": (1, 0),
623 "build_fcn": (TBuilder.Unstack, TGen.tgPooling, ArgGen.agAxes),
624 "types": TYPE_F,
625 },
TatWai Chongf7008da2022-09-09 09:35:40 +0000626 "mirrorpad": {
627 "operands": (1, 0),
628 "build_fcn": (TBuilder.MirrorPad, TGen.tgBasic, ArgGen.agMirrorPad),
629 "types": TYPE_FI,
630 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000631 "pad": {
632 "operands": (1, 0),
633 "build_fcn": (TBuilder.Pad, TGen.tgBasic, ArgGen.agPad),
TatWai Chong2226f902023-02-22 18:38:01 -0800634 "types": {
635 "tf": TYPE_F,
636 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
637 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000638 },
639 "expand_dims": {
640 "operands": (1, 0),
641 "build_fcn": (TBuilder.ExpandDims, TGen.tgBasic, ArgGen.agStack),
642 "types": TYPE_FI,
643 },
644 "shape": {
645 "operands": (1, 0),
646 "build_fcn": (TBuilder.Shape, TGen.tgBasic, ArgGen.agNone),
647 "types": TYPE_FI,
648 },
649 "rank": {
650 "operands": (1, 0),
651 "build_fcn": (TBuilder.Rank, TGen.tgBasic, ArgGen.agNone),
652 "types": TYPE_FI,
653 },
654 "fill": {
655 "operands": (1, 0),
656 "build_fcn": (TBuilder.Fill, TGen.tgBasic, ArgGen.agFill),
657 "types": TYPE_FI,
658 },
659 "elu": {
660 "operands": (1, 0),
661 "build_fcn": (TBuilder.Elu, TGen.tgBasic, ArgGen.agNone),
662 "types": TYPE_F,
663 },
664 "softmax": {
665 "operands": (1, 0),
666 "build_fcn": (TBuilder.Softmax, TGen.tgBasic, ArgGen.agNone),
667 "types": {
668 "tf": TYPE_F,
669 "tflite": list(
670 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
671 ),
672 },
673 },
674 "log_softmax": {
675 "operands": (1, 0),
676 "build_fcn": (TBuilder.LogSoftmax, TGen.tgBasic, ArgGen.agNone),
677 "types": TYPE_F,
678 },
679 "matmul": {
680 "operands": (2, 0),
681 "build_fcn": (TBuilder.MatMul, TGen.tgMatmul, ArgGen.agNone),
682 "types": {
683 "tf": TYPE_F,
684 "tflite": list(
685 TYPE_F
686 + [QuantType.ALL_U8, QuantType.ALL_I8]
687 # 16 bits matmul fail to convert
688 ),
689 },
690 },
691 "add_scalar": {
692 "operands": (1, 0),
693 "build_fcn": (TBuilder.AddScalar, TGen.tgBasic, ArgGen.agNone),
694 "types": TYPE_F,
695 },
696 "add_1d": {
697 "operands": (2, 0),
698 "build_fcn": (TBuilder.Add1d, TGen.tgBasic, ArgGen.agNone),
699 "types": TYPE_F,
700 },
701 "split": {
702 "operands": (1, 0),
703 "build_fcn": (TBuilder.Split, TGen.tgBasic, ArgGen.agSplit),
704 "types": TYPE_FI,
705 },
706 "tile": {
707 "operands": (1, 0),
708 "build_fcn": (TBuilder.Tile, TGen.tgBasic, ArgGen.agTile),
709 "types": TYPE_FI,
710 },
711 "reverse": {
712 "operands": (1, 0),
713 "build_fcn": (TBuilder.Reverse, TGen.tgBasic, ArgGen.agAxes),
714 "types": {"tf": TYPE_FI},
715 },
716 "gather": {
717 "operands": (1, 0),
718 "build_fcn": (TBuilder.Gather, TGen.tgBasic, ArgGen.agGather),
719 "types": TYPE_FI,
720 },
721 "gather_nd": {
722 "operands": (1, 0),
723 "build_fcn": (TBuilder.GatherNd, TGen.tgBasic, ArgGen.agGatherND),
724 "types": TYPE_FI,
725 },
726 "scatter_nd": {
727 "operands": (1, 0),
728 "build_fcn": (TBuilder.ScatterNd, TGen.tgBasic, ArgGen.agScatterND),
729 "types": TYPE_FI,
730 },
731 "space_to_batch": {
732 "operands": (1, 0),
733 "build_fcn": (TBuilder.SpaceToBatch, TGen.tgBasic, ArgGen.agSpaceToBatch),
734 "types": TYPE_F,
735 },
736 "batch_to_space": {
737 "operands": (1, 0),
738 "build_fcn": (TBuilder.BatchToSpace, TGen.tgBasic, ArgGen.agBatchToSpace),
739 "types": TYPE_F,
740 },
741 "space_to_depth": {
742 "operands": (1, 0),
743 "build_fcn": (TBuilder.SpaceToDepth, TGen.tgBasic, ArgGen.agSpaceToDepth),
744 "types": TYPE_F,
745 },
746 "depth_to_space": {
747 "operands": (1, 0),
748 "build_fcn": (TBuilder.DepthToSpace, TGen.tgBasic, ArgGen.agDepthToSpace),
749 "types": TYPE_F,
750 },
751 "one_hot": {
752 "operands": (3, 1),
753 "build_fcn": (TBuilder.OneHot, TGen.tgOneHot, ArgGen.agOneHot),
754 "types": TYPE_FI,
755 },
756 "fakequant": {
757 "operands": (1, 0),
758 "build_fcn": (
759 TBuilder.Fakequant,
760 TGen.tgBasic,
761 ArgGen.agFakequant,
762 ),
763 "types": {"tf": TYPE_F},
764 },
TatWai Chong0cef07e2023-02-27 13:22:52 -0800765 "resize": {
Jeremy Johnson015c3552022-02-23 12:15:03 +0000766 "operands": (1, 0),
TatWai Chong0cef07e2023-02-27 13:22:52 -0800767 "build_fcn": (TBuilder.Resize, TGen.tgPooling, ArgGen.agResize),
TatWai Chongf7326092022-06-08 12:17:14 -0700768 "types": {
769 "tf": TYPE_F,
770 "tflite": list(
771 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
772 ),
773 },
774 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000775 "left_shift": {
776 "operands": (1, 0),
777 "build_fcn": (TBuilder.LeftShift, TGen.tgBasic, ArgGen.agShift),
778 "types": {"tf": [tf.int32]},
779 },
780 "right_shift": {
781 "operands": (1, 0),
782 "build_fcn": (TBuilder.RightShift, TGen.tgBasic, ArgGen.agShift),
783 "types": {
784 "tf": [
785 tf.int32,
786 ]
787 },
788 },
Jerry Ge9e94af82022-10-27 09:57:00 -0700789 "while": {
790 "operands": (1, 0),
791 "build_fcn": (TBuilder.While, TGen.tgBasic, ArgGen.agNone),
792 "types": {
793 "tflite": list(TYPE_F),
794 },
795 },
796 "lstm": {
797 "operands": (1, 0),
798 "build_fcn": (TBuilder.LSTM, TGen.tgRecurrent, ArgGen.agNone),
799 "types": {
800 "tflite": [
801 tf.float32,
802 # tf.int32
803 ]
804 },
805 },
806 "gru": {
807 "operands": (1, 0),
808 "build_fcn": (TBuilder.GRU, TGen.tgRecurrent, ArgGen.agNone),
809 "types": {
810 "tflite": [
811 tf.float32,
812 # tf.int32
813 ]
814 },
815 },
816 "rnn": {
817 "operands": (1, 0),
818 "build_fcn": (TBuilder.RNN, TGen.tgRecurrent, ArgGen.agNone),
819 "types": {
820 "tflite": [
821 tf.float32,
822 ]
823 },
824 },
Luke Hutton261b7b62023-01-10 14:50:31 +0000825 "rfft2d": {
826 "operands": (1, 0),
827 "build_fcn": (TBuilder.RFFT2d, TGen.tgRFFT2d, ArgGen.agRFFT2d),
828 "types": {
829 "tflite": TYPE_F,
830 },
831 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000832}
833
834# Shapes to be tested; default can be overwritten
835shape_list = [
836 (1,),
837 (64,),
838 (14, 19),
839 (13, 21, 3),
Luke Hutton261b7b62023-01-10 14:50:31 +0000840 (1, 8, 16),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000841 (1, 4, 4, 4),
842 (1, 8, 4, 17),
843 (1, 4, 8, 19),
844 (1, 32, 32, 8),
845 (1, 7, 7, 9),
Jerry Gea56e1372023-02-13 20:51:26 -0800846 (1, 7, 7, 479),
TatWai Chong0cef07e2023-02-27 13:22:52 -0800847 (3, 1, 1, 7),
TatWai Chongfd629052022-07-25 04:01:58 +0000848 (2, 2, 7, 7, 2),
849 (1, 4, 8, 21, 17),
850 (3, 32, 16, 16, 5),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000851]
852
853
854def gen_rand_shapes(args):
855 """Overwrite the global shape list with a new list of random shapes"""
856 global shape_list
857
858 rng = np.random.default_rng(args.random_seed)
859
860 # Don't let things get too big... cap the maximum volume, but let
861 # an individual dimension be 1..47
862 max_total_volume = 32 * 32 * 4
863
864 shape_list = []
TatWai Chongfd629052022-07-25 04:01:58 +0000865 # Only iterate over ranks 2, 3, 4, and 5
866 for rank in range(2, 6):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000867 for n in range(args.random_shapes):
868 new_shape = rng.integers(1, 48, size=rank)
869
TatWai Chongfd629052022-07-25 04:01:58 +0000870 # Set the batch dimension on 4D or 5D objects to 1
871 if rank == 4 or rank == 5:
Jeremy Johnson015c3552022-02-23 12:15:03 +0000872 new_shape[0] = 1
873
874 # Limit the total shape volume and throw out any
875 # shapes that wouldn't leave at least size=2 in some non-batch dimension
876 volume = 1
877 skip_shape = False
878 for i in range(rank):
879
880 volume *= new_shape[i]
881
882 # Reduce the shape, while it's larger than the maximum volume
883 while volume > max_total_volume:
884 new_shape[i] = new_shape[i] // 2
885 volume = volume // 2
886
887 # Now an untenable dimension size? Skip this one.
888 if new_shape[i] < 1:
889 skip_shape = True
890
891 if not skip_shape:
892 shape_list.append(tuple(new_shape))
893
894
895# Construct, run and save a whole tensorflow tf.function to a protobuf file
896# or convert to .tflite if it's quantized unit test
897def run_unit_test(
898 op_name,
899 args,
900 test_dir,
901 curr_shape,
902 addl_args,
903 dtype,
904 excluded_framework_list,
905 quantized_inference_dtype,
906 result_name,
907 seed,
908):
909
910 try:
911 op = TF_OP_LIST[op_name]
912 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
913
914 # Get and seed a random number generator for this test
915 rng = np.random.default_rng(seed)
916
917 # return placeholders=(str: name, np.array: value)
918 # consts=(str: name, np.array: value)
919 placeholders, consts = tensor_gen_fcn(op, curr_shape, dtype, rng)
920
921 # if test doesn't have any placeholders/consts, terminated
922 if len(placeholders) == 0 and len(consts) == 0:
923 return True
924
925 if not args.quiet:
926 print(" {} ".format(test_dir))
927
928 try:
929 os.mkdir(test_dir)
930 except FileExistsError:
931 pass
932
933 const_nodes = [value for name, value in consts]
934
935 num_placeholders = len(placeholders)
936 # if test is quantized, create tensor quantization metadata info for
937 # each input tensor, based on different quantized type
938 if quantized_inference_dtype:
939 is_quantized = True
940 # TODO: support INT8 IFM x INT4 weight later
941 if quantized_inference_dtype == QuantType.ALL_U8:
942 qzero = [128] * num_placeholders
943 numpy_dtype = [np.uint8] * num_placeholders
944 tflite_inference_dtype = tf.uint8
945 elif quantized_inference_dtype == QuantType.ALL_I8:
946 qzero = [0] * num_placeholders
947 numpy_dtype = [np.int8] * num_placeholders
948 tflite_inference_dtype = tf.int8
949 elif quantized_inference_dtype == QuantType.ALL_I16:
950 qzero = [0] * num_placeholders
951 numpy_dtype = [np.int16] * num_placeholders
952 tflite_inference_dtype = tf.int16
953 elif quantized_inference_dtype == QuantType.CONV_U8_U8:
954 assert (
955 num_placeholders == 1
956 ), "Unsupported number of placeholders for Convolution: {}".format(
957 num_placeholders
958 )
959 qzero = [128] * num_placeholders
960 if num_placeholders == 2:
961 numpy_dtype = [np.uint8, np.uint8]
962 else:
963 numpy_dtype = [np.uint8, np.uint8, np.int32]
964 tflite_inference_dtype = tf.uint8
965 elif quantized_inference_dtype == QuantType.CONV_I8_I8:
966 assert (
967 num_placeholders == 1
968 ), "Unsupported number of placeholders for Convolution: {}".format(
969 num_placeholders
970 )
971 qzero = [0] * num_placeholders
972 if num_placeholders == 2:
973 numpy_dtype = [np.int8, np.int8]
974 else:
975 numpy_dtype = [np.int8, np.int8, np.int32]
976 tflite_inference_dtype = tf.int8
977 elif quantized_inference_dtype == QuantType.CONV_I16_I8:
978 assert (
979 num_placeholders == 1
980 ), "Unsupported number of placeholders for Convolution: {}".format(
981 num_placeholders
982 )
983 if num_placeholders == 2:
984 qzero = [0, 0]
985 numpy_dtype = [np.int16, np.int8]
986 else:
987 qzero = [0, 0, 0]
988 numpy_dtype = [
989 np.int16,
990 np.int8,
991 np.int64,
992 ] # np.int64 to represent 40 bits accumulator
993 tflite_inference_dtype = tf.int16
994 else:
995 raise Exception(
996 "Unsupported fakequant dtype: {}".format(quantized_inference_dtype)
997 )
998
999 else:
1000 is_quantized = False
1001
1002 tf_model_filename = None
1003 tf_result_npy_filename = None
1004 tf_result_name = None
1005
1006 tflite_model_filename = None
1007 tflite_result_npy_filename = None
1008 tflite_result_name = None
1009
1010 placeholder_names = []
1011 placeholder_vals = []
1012 placeholder_signatures = ()
1013 placeholder_npy_filenames = []
1014 placeholder_shapes = []
1015
1016 for idx, (name, val) in enumerate(placeholders):
1017 placeholder_names.append(name)
1018 placeholder_signatures = placeholder_signatures + (
1019 tf.TensorSpec(shape=val.shape, dtype=val.dtype, name=name),
1020 )
1021 placeholder_npy_filenames.append("{}.npy".format(name.split(":")[0]))
1022 placeholder_shapes.append(val.shape)
1023
1024 # Get test builder class
1025 fcn_node = op_fcn(*const_nodes, *addl_args, result_name)
1026 concrete_function = tf.function(input_signature=placeholder_signatures)(
1027 fcn_node.eval
1028 ).get_concrete_function()
1029
1030 if is_quantized:
1031
1032 assert dtype is tf.float32, "quantized test must come from float32 graph"
1033
1034 # 1. Quantize float placeholder npy to quantized to feed the graph
1035 for idx, (name, val) in enumerate(placeholders):
1036
1037 # we use np.amin()/np.amax() to determine dynamic range
1038 # for quantized test
1039 zeropoint = 0
1040 scale = 1.0
1041 if numpy_dtype[idx] != np.int64:
1042 qmin = np.iinfo(numpy_dtype[idx]).min
1043 qmax = np.iinfo(numpy_dtype[idx]).max
1044 num_bits = np.iinfo(numpy_dtype[idx]).bits
1045 # 40 bit is represented as np.int64
1046 else:
1047 num_bits = 40
1048 qmin = -(1 << num_bits)
1049 qmax = (1 << num_bits) - 1
1050
1051 min_val = np.amin(val)
1052 max_val = np.amax(val)
1053
1054 # for single value tensor, we set scale equal to the abs(value),
1055 # and fix zeropoint to 128
1056 # if val > 0, it'll be represented as 129,
1057 # where val = (129 - 128) * val
1058 # if val < 0, it'll be represented as 127,
1059 # where val = (127 - 128) * (-val)
1060 # if val == 0, it'll be represted as 128, with range [-128.0, 128.0]
1061 # and let quantized 1 represent the value
1062 # also adjust effective min/max consequently
1063 if max_val == min_val:
1064 if max_val != 0:
1065 scale = abs(max_val)
1066 else:
1067 scale = 1.0
1068 min_val = float(qmin - qzero[idx]) * scale
1069 max_val = float(qmax - qzero[idx]) * scale
1070 else:
1071 scale = (max_val - min_val) / float(qmax - qmin)
1072 zeropoint = int(round((-min_val) / scale)) + qmin
1073
1074 # run through tf.fakequant first to assure quantization error aligned
1075 fakequant_val = tf.quantization.fake_quant_with_min_max_args(
1076 val,
1077 min=min_val,
1078 max=max_val,
1079 num_bits=num_bits,
1080 name="gen_quant_npy",
1081 )
1082
1083 quant_val = np.round(fakequant_val / scale).astype(np.int32) + zeropoint
1084
1085 # very few unit tests after TF hash may/2020, this quantized
1086 # value for some reason exceed [0, 255] range
1087 saved_val = np.clip(quant_val, qmin, qmax).astype(numpy_dtype[idx])
1088
1089 # saved all quantized tensor as np.int32
1090 # since TOSA numpy Cpp API only supports int32
1091 np.save(
1092 os.path.join(test_dir, placeholder_npy_filenames[idx]),
1093 saved_val.astype(np.int32),
1094 False,
1095 )
1096
1097 placeholder_vals.append(tf.convert_to_tensor(saved_val))
1098
1099 # 2. Convert the model to quantized TFLite flatbuffer
1100 module = tf.Module()
1101 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1102 [concrete_function], module
1103 )
1104 converter.optimizations = [tf.lite.Optimize.DEFAULT]
1105 converter.experimental_new_converter = True
1106
1107 # use MLIR-based post-quantizer
1108 converter.experimental_new_quantizer = True
1109
1110 flag = (
1111 tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 # noqa: E501
1112 )
1113 if tflite_inference_dtype == tf.int16:
1114 converter.target_spec.supported_ops = [flag]
1115
1116 def input_stats():
1117 for i in range(0, args.num_samples):
1118 a = [
1119 TGen.getRand(shape, tf.float32, rng)
1120 for shape in placeholder_shapes
1121 ]
1122 yield a
1123
1124 converter.representative_dataset = input_stats
1125 converter.inference_input_type = tflite_inference_dtype
1126 converter.inference_output_type = tflite_inference_dtype
1127
1128 tflite_model = converter.convert()
1129
1130 tflite_model_filename = "model.tflite"
1131
1132 # Write out converted model to disk
1133 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1134 f.write(tflite_model)
1135
1136 else: # is_quantized is False
1137
1138 # 1. Saved out numpy array directly
1139 for idx, (name, val) in enumerate(placeholders):
1140 placeholder_vals.append(tf.convert_to_tensor(val))
1141 np.save(
1142 os.path.join(test_dir, placeholder_npy_filenames[idx]), val, False
1143 )
1144
1145 # 2.a Saved out .pb if framework includes tensorflow
1146 if "tf" not in excluded_framework_list:
1147 # Write out graph as protobuf to disk
1148 tf_model_filename = "model.pb"
1149 tf.io.write_graph(
1150 concrete_function.graph, test_dir, tf_model_filename, True
1151 )
1152
1153 # 2.b Saved out .tflite if framework includes tflite
1154 if "tflite" not in excluded_framework_list:
1155 # Convert the model to TFLite flatbuffer
1156 module = tf.Module()
1157 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1158 [concrete_function], module
1159 )
1160
1161 converter.experimental_new_converter = True
1162
1163 # Even it's non-quantized int32 test, this needs to be set to tf.float32
1164 converter.inference_input_type = tf.float32
1165 converter.inference_output_type = tf.float32
1166 tflite_model = converter.convert()
1167
1168 # Write out converted model to disk
1169 tflite_model_filename = "model.tflite"
1170 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1171 f.write(tflite_model)
1172
1173 # Get TF reference result if .pb is specified
1174 if tf_model_filename:
1175 tf_result_npy_filename = "tf_result.npy"
1176 tf_result = concrete_function(*placeholder_vals)
1177 np.save(os.path.join(test_dir, tf_result_npy_filename), tf_result, False)
1178
1179 tf_result_name = result_name
1180
1181 # Get TFLite inference result if .tflite is specified
1182 if tflite_model_filename:
1183 tflite_result_npy_filename = "tflite_result.npy"
1184
Luke Hutton5c844212023-01-27 14:17:52 +00001185 ops_with_optimized_only_kernel = ["elu", "ceil", "gather", "rfft2d"]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001186
1187 if args.tflite_kernel_mode == "optimized" or (
1188 op_name in ops_with_optimized_only_kernel
1189 ):
1190 interpreter = tf.lite.Interpreter(
1191 model_path=os.path.join(test_dir, tflite_model_filename)
1192 )
1193 elif args.tflite_kernel_mode == "reference":
1194 interpreter = tf.lite.Interpreter(
1195 model_path=os.path.join(test_dir, tflite_model_filename),
1196 experimental_op_resolver_type=OpResolverType.BUILTIN_REF,
1197 )
1198 else:
1199 assert 0, "unknown tflite interpreter mode {}".format(
1200 args.tflite_kernel_mode
1201 )
1202 interpreter.allocate_tensors()
1203
1204 input_details = interpreter.get_input_details()
1205 output_details = interpreter.get_output_details()
1206
1207 assert len(input_details) == len(
1208 placeholder_vals
1209 ), "number of placeholder mismatch"
1210
1211 for idx, val in enumerate(placeholder_vals):
1212 interpreter.set_tensor(input_details[idx]["index"], val.numpy())
1213
1214 interpreter.invoke()
1215 tflite_result = interpreter.get_tensor(output_details[0]["index"])
1216
1217 np.save(
1218 os.path.join(test_dir, tflite_result_npy_filename), tflite_result, False
1219 )
1220
1221 # Result tensor name would change after converting to TFLite flatbuffer
1222 # Overwrite the information from TFLite models directly.
1223 # Assume single result tensor now
1224 tflite_result_name = output_details[0]["name"]
1225
1226 # Write out test descriptor
1227 write_test_json(
1228 filename=os.path.join(test_dir, "test.json"),
1229 tf_model_filename=tf_model_filename,
1230 tf_result_npy_filename=tf_result_npy_filename,
1231 tf_result_name=tf_result_name,
1232 tflite_model_filename=tflite_model_filename,
1233 tflite_result_npy_filename=tflite_result_npy_filename,
1234 tflite_result_name=tflite_result_name,
1235 ifm_name=placeholder_names,
1236 ifm_file=placeholder_npy_filenames,
1237 ifm_shape=placeholder_shapes,
1238 framework_exclusions=excluded_framework_list,
1239 quantized=is_quantized,
1240 )
1241 except Exception as e:
1242 msg = "Error running task: {}".format(e)
1243 print(msg)
1244 print(
1245 "".join(
1246 traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)
1247 )
1248 )
1249 return False
1250 return True
1251
1252
1253def build_const_net(
1254 args,
1255 curr_shape,
1256 op_name,
1257 dtype,
1258 excluded_framework_list,
1259 quantized_inference_dtype,
1260 result_name,
1261 seed,
1262 rng,
1263 filter,
1264 unit_test_args,
1265):
1266
1267 if quantized_inference_dtype:
1268 quant_dtype = get_tf_dtype(quantized_inference_dtype)
1269 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, quant_dtype))
1270 else:
1271 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, dtype))
1272 test_dir = os.path.join(args.output_dir, test_dir)
1273
1274 # If the operator has an additional function to generate arguments, call it
1275 # here and iterate through the argument list that it generates
1276 op = TF_OP_LIST[op_name]
1277 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1278
TatWai Chongfd629052022-07-25 04:01:58 +00001279 try:
1280 rank_lo, rank_hi = op["rank"]
1281 except KeyError:
1282 # Set testing rank to (1, 4) in default.
1283 rank_lo = 1
1284 rank_hi = 4
1285
1286 if len(curr_shape) not in range(rank_lo, rank_hi + 1):
1287 return
1288
Jeremy Johnson015c3552022-02-23 12:15:03 +00001289 addl_args_tuple = arg_gen_fcn(op, curr_shape, rng)
1290 for desc, addl_args in addl_args_tuple:
Jeremy Johnson0e6218e2022-05-05 17:08:04 +01001291 # Only filter on the full test_name, not the output directory
1292 _, test_name = os.path.split(test_dir + desc)
1293 if not filter or filter.search(test_name):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001294 unit_test_args.append(
1295 [
1296 op_name,
1297 args,
1298 test_dir + desc,
1299 curr_shape,
1300 addl_args,
1301 dtype,
1302 excluded_framework_list,
1303 quantized_inference_dtype,
1304 result_name,
1305 seed,
1306 ]
1307 )
1308
1309
1310# python hash is not reproducible, create hash for our purpose
1311def op_name_hash(op_name):
1312 result = 0xDEADBEEF
1313 for ch in op_name:
1314 if result & 1:
1315 result = (ord(ch) << 24) ^ (result >> 1) ^ 0x82608EDB
1316 else:
1317 result = (ord(ch) << 24) ^ (result >> 1)
1318
1319 return result
1320
1321
1322def generate_op_tests(args, op_name, shape_list, result_name, filter, unit_test_args):
1323
1324 if not args.quiet:
1325 print(
1326 "Generating tests for {} ".format(
1327 op_name
1328 )
1329 )
1330
1331 op = TF_OP_LIST[op_name]
1332
1333 # Seed the RNG so that we get the same random tests for each test each time
1334 # If the number of tests for a given generation function changes, the tests
1335 # for that operator may also change accordingly, but this will at least keep
1336 # down churn across operators.
1337
1338 bounded_hash_val = (args.random_seed + op_name_hash(op_name)) % np.iinfo(
1339 np.int32
1340 ).max
1341 rng = np.random.default_rng(bounded_hash_val)
1342
1343 # this is a dictionary with 'tf' and 'tflite' as key
1344 # and value being the data types we want to test under these framework
1345
1346 if isinstance(op["types"], dict):
1347 try:
1348 tf_dtypes = op["types"]["tf"]
1349 except KeyError:
1350 tf_dtypes = []
1351 try:
1352 tflite_dtypes = op["types"]["tflite"]
1353 except KeyError:
1354 tflite_dtypes = []
1355 elif isinstance(op["types"], list):
1356 tf_dtypes = op["types"]
1357 tflite_dtypes = op["types"]
1358
1359 tf_nonquantized_dtypes = tf_dtypes # tf doesn't support quantized data types
1360 tflite_quantized_dtypes = []
1361 tflite_nonquantized_dtypes = []
1362 for dtype in tflite_dtypes:
1363 if isinstance(dtype, QuantType):
1364 tflite_quantized_dtypes.append(dtype)
1365 else:
1366 tflite_nonquantized_dtypes.append(dtype)
1367
1368 nonquantized_dtypes_set = set(tf_nonquantized_dtypes).union(
1369 set(tflite_nonquantized_dtypes)
1370 )
1371 nonquantized_dtypes = list(nonquantized_dtypes_set)
1372 quantized_dtypes = tflite_quantized_dtypes
1373
1374 # populate non quantized unit test arguments
1375 for dtype in nonquantized_dtypes:
1376
1377 excluded_framework_set = set(ALL_FRAMEWORKS)
1378 if dtype in tf_nonquantized_dtypes:
1379 excluded_framework_set.remove("tf")
1380 if dtype in tflite_nonquantized_dtypes:
1381 excluded_framework_set.remove("tflite")
1382 excluded_framework_list = list(excluded_framework_set)
1383
1384 for curr_shape in shape_list:
1385 build_const_net(
1386 args,
1387 curr_shape,
1388 op_name,
1389 dtype,
1390 excluded_framework_list,
1391 None,
1392 result_name,
1393 bounded_hash_val,
1394 rng,
1395 filter,
1396 unit_test_args,
1397 )
1398
1399 # populate quantized unit test arguments
1400 # must exclude 'tf' and source dtype being tf.float32
1401 for dtype in quantized_dtypes:
1402 for curr_shape in shape_list:
1403 build_const_net(
1404 args,
1405 curr_shape,
1406 op_name,
1407 tf.float32,
1408 ["tf"],
1409 dtype,
1410 result_name,
1411 bounded_hash_val,
1412 rng,
1413 filter,
1414 unit_test_args,
1415 )
1416
1417 return unit_test_args
1418
1419
1420def createDynamicOpLists():
1421 """The templated operators are conv2d-style operators with a number of kernel
1422 sizes. Since the operator is unchanged, we generate the range of kernel
1423 sizes here in this loop and remove the original templates from the list.
1424
1425 This could be expanded to non-conv2d-style operators in the future."""
1426
1427 # Dynamically create op lists for convolutions with a list of kernel sizes
1428 KERNELS = [
1429 [1, 1],
1430 [3, 3],
1431 [5, 5],
1432 ]
1433
TatWai Chongfd629052022-07-25 04:01:58 +00001434 # dim = [D, H, W]
1435 KERNELS_3D = [
1436 [1, 1, 1],
1437 [2, 3, 3],
1438 [3, 5, 5],
1439 ]
1440
Jeremy Johnson015c3552022-02-23 12:15:03 +00001441 TEMPLATE_LIST = [
1442 "conv2d",
1443 "conv2d_bias",
1444 "conv2d_relu",
1445 "conv2d_relu6",
1446 "conv2d_relu_n1_to_1",
1447 "conv2d_tanh",
1448 "depthwise_conv2d",
1449 "depthwise_conv2d_bias",
1450 "transpose_conv2d",
1451 ]
1452
TatWai Chongfd629052022-07-25 04:01:58 +00001453 TEMPLATE_LIST_CONV3D = [
1454 "conv3d",
1455 "conv3d_bias",
1456 ]
1457
Jeremy Johnson015c3552022-02-23 12:15:03 +00001458 for t in TEMPLATE_LIST:
1459 for k in KERNELS:
1460 testName = "{}_{}x{}".format(t, k[0], k[1])
1461 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1462 TF_OP_LIST[testName]["filter"] = k
1463 TF_OP_LIST[testName]["template"] = False
1464
TatWai Chongfd629052022-07-25 04:01:58 +00001465 # The existing operators don't support the dimension of kernel that is higher than 2.
1466 for t in TEMPLATE_LIST_CONV3D:
1467 for k in KERNELS_3D:
1468 testName = "{}_{}x{}x{}".format(t, k[0], k[1], k[2])
1469 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1470 TF_OP_LIST[testName]["filter"] = k
1471 TF_OP_LIST[testName]["template"] = False
1472
Jeremy Johnson015c3552022-02-23 12:15:03 +00001473 # Delete any templates after having created any dynamic ops
1474 # This is a two-pass operation because it's bad practice to delete
1475 # keys from dictionaries while iterating
1476 keyList = []
1477 for k in TF_OP_LIST:
1478 try:
1479 if TF_OP_LIST[k]["template"]:
1480 keyList.append(k)
1481 continue
1482 except KeyError:
1483 pass
1484
1485 for k in keyList:
1486 del TF_OP_LIST[k]
1487
1488
1489def main():
1490 parser = argparse.ArgumentParser()
1491 parser.add_argument(
1492 "--seed", dest="random_seed", default=42, type=int, help="Random seed"
1493 )
1494 parser.add_argument(
1495 "--random-shapes",
1496 dest="random_shapes",
1497 default=0,
1498 type=int,
1499 help=(
1500 "Use N random shapes of each rank for generating tests,"
1501 "seeded with random seed"
1502 ),
1503 )
1504 parser.add_argument(
1505 "-o",
1506 "--output-dir",
1507 dest="output_dir",
1508 default=".",
1509 type=str,
1510 help="Test output directory path prefix",
1511 )
1512 parser.add_argument(
1513 "-q",
1514 "--quiet",
1515 dest="quiet",
1516 default=False,
1517 action="store_true",
1518 help="Do not print test names",
1519 )
1520 parser.add_argument(
1521 "-j", "--jobs", dest="jobs", type=int, default=1, help="Number of parallel jobs"
1522 )
1523 parser.add_argument(
1524 "-m",
1525 "--tflite-kernel-mode",
1526 dest="tflite_kernel_mode",
1527 type=str,
1528 choices=["reference", "optimized"],
1529 default="reference",
1530 help="TFLite interpreter kernel mode",
1531 )
1532 parser.add_argument(
1533 "--num-samples",
1534 dest="num_samples",
1535 default=200,
1536 type=int,
1537 help="Number of input samples for post-training quantization",
1538 )
1539 parser.add_argument(
1540 "--filter",
1541 dest="filter",
1542 default="",
1543 type=str,
1544 help="Filter test names by this expression",
1545 )
1546 args = parser.parse_args()
1547
1548 # Turn the filter into a re object if present
1549 filter = None
1550 if args.filter != "":
1551 filter = re.compile(args.filter)
1552
1553 # Autodetect CPU count
1554 if args.jobs <= 0:
1555 args.jobs = os.cpu_count()
1556
1557 # Disable TF info messages
1558 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
1559
1560 try:
1561 os.makedirs(args.output_dir)
1562 except FileExistsError:
1563 pass
1564
1565 if args.random_shapes:
1566 gen_rand_shapes(args)
1567
1568 # Build dynamic ops
1569 createDynamicOpLists()
1570
1571 # Generate the test list and arguments to run_unit_test()
1572 unit_test_args = []
1573
1574 for op in TF_OP_LIST:
1575 generate_op_tests(args, op, shape_list, "result", filter, unit_test_args)
1576
1577 errors = 0
1578 for t in unit_test_args:
1579 if not run_unit_test(*t):
1580 errors = errors + 1
1581
1582 if not args.quiet:
1583 print("\nAll tasks done - with {} errors".format(errors))
1584
1585 return 1 if errors else 0
1586
1587
1588if __name__ == "__main__":
1589 exit(main())