blob: 760def632e76194fe650183d3e5419affadfc4cb [file] [log] [blame]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001#!/usr/bin/env python3
2# Copyright (c) 2020-2022, ARM Limited.
3# SPDX-License-Identifier: Apache-2.0
4import argparse
5import os
6import re
7import traceback
8
9import numpy as np
10
11# Level | Level for Humans | Level Description
12# -------|------------------|------------------------------------
13# 0 | DEBUG | [Default] Print all messages
14# 1 | INFO | Filter out INFO messages
15# 2 | WARNING | Filter out INFO & WARNING messages
16# 3 | ERROR | Filter out all messages
17# Filter tensorflow debug message except errors
18os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
19
20# Flake8 E402 - ignore imports not at top of file to allow os.environ setting
21import tensorflow as tf # noqa: E402
22from frameworks.write_test_json import write_test_json # noqa: E402
23from frameworks.arg_gen import ArgGen # noqa: E402
24from frameworks.tensor_gen import TGen # noqa: E402
25from frameworks.test_builder import TBuilder # noqa: E402
Jeremy Johnson5d1a3472022-03-31 09:50:06 +010026from frameworks.test_gen_utils import ( # noqa: E402
Jeremy Johnson015c3552022-02-23 12:15:03 +000027 QuantType,
28 get_tf_dtype,
29 get_shape_str,
30) # noqa: E402
31from tensorflow.lite.python.interpreter import OpResolverType # noqa: E402
32
33# All of the supported frameworks
34ALL_FRAMEWORKS = ["tf", "tflite"]
35
36# Lists of different data types
37TYPE_F = [tf.float32]
38TYPE_I = [tf.int32]
39TYPE_FI = [tf.float32, tf.int32]
40TYPE_B = [tf.bool]
41TYPE_FIB = [tf.float32, tf.int32, tf.bool]
42TYPE_H = [tf.float16]
43TYPE_FH = [tf.float32, tf.float16]
44TYPE_FHI = [tf.float32, tf.float16, tf.int32]
45TYPE_FHIB = [tf.float32, tf.float16, tf.int32, tf.bool]
46
47# The list of operator tests
48# Each dictionary entry for an op is a dictionary with the following required members:
49# 'operands': tuple (number_of_placeholder_tensors, number_of_constant_tensors)
50# 'build_fcn: tuple (Test builder function, Tensor generator function,
51# Argument generator function)
52# 'types': list of Tensorflow types that should be tested for this op
53# OR
54# a dictionary of {'framework_name': [type_list] } for cases where only
55# a subset of the types should be tested in each framework. This can also
56# be used to restrict an operator to a particular framework.
57#
58# And optional members:
59# 'template': boolean (indicates that this is a templated op which gets further
60# processing in createDynamicOpLists)
61# 'bias': boolean indicating that there is a bias component to be generated
62# 'qtypes': List of QuantType quantized types to generate for this op
TatWai Chongfd629052022-07-25 04:01:58 +000063# 'rank': tuple (lowest rank, highest rank). Dimension range of input tensor.
Jeremy Johnson015c3552022-02-23 12:15:03 +000064
65TF_OP_LIST = {
66 "add": {
67 "operands": (2, 0),
68 "build_fcn": (TBuilder.Add, TGen.tgBFuzz, ArgGen.agNone),
69 "types": {
70 "tf": TYPE_FI,
71 "tflite": list(
72 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
73 ),
74 },
75 },
76 "sub": {
77 "operands": (2, 0),
78 "build_fcn": (TBuilder.Sub, TGen.tgBFuzz, ArgGen.agNone),
79 "types": {
80 "tf": TYPE_FI,
81 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
82 # QuantType.ALL_I16 fail in TFLite conversion
83 },
84 },
85 "mul": {
86 "operands": (2, 0),
87 "build_fcn": (TBuilder.Mul, TGen.tgBFuzz, ArgGen.agNone),
88 "types": {
89 "tf": TYPE_FI,
90 "tflite": list(
91 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
92 ),
93 },
94 },
95 "exp": {
96 "operands": (1, 0),
97 "build_fcn": (TBuilder.Exp, TGen.tgBasic, ArgGen.agNone),
98 "types": TYPE_F,
99 },
100 "rcp": {
101 "operands": (1, 0),
102 "build_fcn": (TBuilder.Rcp, TGen.tgBasic, ArgGen.agNone),
103 "types": TYPE_F,
104 },
105 "relu": {
106 "operands": (1, 0),
107 "build_fcn": (TBuilder.Relu, TGen.tgBasic, ArgGen.agNone),
108 "types": {
109 "tf": TYPE_F,
110 "tflite": list(
111 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
112 ),
113 },
114 },
Jerry Ge93912432022-07-22 10:29:13 -0700115 "relu1": {
116 "operands": (1, 0),
117 "build_fcn": (TBuilder.Relu1, TGen.tgBasic, ArgGen.agNone),
118 "types": {
119 "tf": [],
120 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
121 },
122 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000123 "relu6": {
124 "operands": (1, 0),
125 "build_fcn": (TBuilder.Relu6, TGen.tgBasic, ArgGen.agNone),
126 "types": {
127 "tf": TYPE_F,
128 "tflite": list(
129 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
130 ),
131 },
132 },
133 "leaky_relu": {
134 "operands": (1, 0),
135 "build_fcn": (TBuilder.LeakyRelu, TGen.tgBasic, ArgGen.agFloat),
136 "types": {
137 "tf": TYPE_F,
138 "tflite": list(
139 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
140 ),
141 },
142 },
TatWai Chong41a04fe2022-11-03 21:44:32 +0000143 "prelu": {
144 "operands": (1, 0),
145 "build_fcn": (TBuilder.Prelu, TGen.tgBasic, ArgGen.agNone),
146 "types": {
147 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
148 },
149 },
TatWai Chong473eb382022-08-02 04:21:30 +0000150 "gelu": {
151 "operands": (1, 0),
152 "build_fcn": (TBuilder.Gelu, TGen.tgBasic, ArgGen.agNone),
153 "types": {
154 # Need compiler support for tf.Erf.
155 # "tf": TYPE_F,
156 "tflite": list(
157 # Only float32, int8 and uint8 supported currently
158 TYPE_F
159 + [QuantType.ALL_U8, QuantType.ALL_I8]
160 ),
161 },
162 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000163 "concat": {
164 "operands": (2, 0),
165 "build_fcn": (TBuilder.Concat, TGen.tgBasic, ArgGen.agAxes),
166 "types": TYPE_FI,
167 },
168 "bitwise_and": {
169 "operands": (2, 0),
170 "build_fcn": (TBuilder.BitwiseAnd, TGen.tgBFuzz, ArgGen.agNone),
171 "types": {"tf": TYPE_I}, # Not supported in TF Lite
172 },
173 "bitwise_or": {
174 "operands": (2, 0),
175 "build_fcn": (TBuilder.BitwiseOr, TGen.tgBFuzz, ArgGen.agNone),
176 "types": {"tf": TYPE_I}, # Not supported in TF Lite
177 },
178 "bitwise_not": {
179 "operands": (1, 0),
180 "build_fcn": (TBuilder.BitwiseNot, TGen.tgBFuzz, ArgGen.agNone),
181 "types": {"tf": TYPE_I}, # Not supported in TF Lite
182 },
183 "bitwise_xor": {
184 "operands": (2, 0),
185 "build_fcn": (TBuilder.BitwiseXor, TGen.tgBFuzz, ArgGen.agNone),
186 "types": {"tf": TYPE_I}, # Not supported in TF Lite
187 },
188 "logical_and": {
189 "operands": (2, 0),
190 "build_fcn": (TBuilder.LogicalAnd, TGen.tgBFuzz, ArgGen.agNone),
191 "types": TYPE_B,
192 },
193 "logical_or": {
194 "operands": (2, 0),
195 "build_fcn": (TBuilder.LogicalOr, TGen.tgBFuzz, ArgGen.agNone),
196 "types": TYPE_B,
197 },
198 "logical_not": {
199 "operands": (1, 0),
200 "build_fcn": (TBuilder.LogicalNot, TGen.tgBFuzz, ArgGen.agNone),
201 "types": TYPE_B,
202 },
203 "reduce_any": {
204 "operands": (1, 0),
205 "build_fcn": (TBuilder.ReduceAny, TGen.tgBasic, ArgGen.agAxesListKeepdims),
206 "types": TYPE_B,
207 },
208 "reduce_all": {
209 "operands": (1, 0),
210 "build_fcn": (TBuilder.ReduceAll, TGen.tgBasic, ArgGen.agAxesListKeepdims),
211 "types": {"tf": TYPE_B},
212 },
213 "reduce_min": {
214 "operands": (1, 0),
215 "build_fcn": (TBuilder.ReduceMin, TGen.tgBasic, ArgGen.agAxesListKeepdims),
216 "types": {
217 "tf": TYPE_FI,
218 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
219 },
220 },
221 "reduce_max": {
222 "operands": (1, 0),
223 "build_fcn": (TBuilder.ReduceMax, TGen.tgBasic, ArgGen.agAxesListKeepdims),
224 "types": {
225 "tf": TYPE_FI,
226 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
227 },
228 },
229 "reduce_sum": {
230 "operands": (1, 0),
231 "build_fcn": (TBuilder.ReduceSum, TGen.tgBasic, ArgGen.agAxesListKeepdims),
232 "types": {
233 "tf": TYPE_F,
234 # v2 converter doesn't recognize quantized reduce_sum
235 # "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
236 "tflite": TYPE_F,
237 },
238 },
239 "reduce_mean": {
240 "operands": (1, 0),
241 "build_fcn": (TBuilder.ReduceMean, TGen.tgBasic, ArgGen.agAxesListKeepdims),
242 "types": {
243 "tf": TYPE_F,
244 "tflite": list(
245 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
246 ),
247 },
248 },
249 "reduce_product": {
250 "operands": (1, 0),
251 "build_fcn": (TBuilder.ReduceProduct, TGen.tgBasic, ArgGen.agAxesListKeepdims),
252 "types": TYPE_F,
253 },
254 "min": {
255 "operands": (2, 0),
256 "build_fcn": (TBuilder.Min, TGen.tgBFuzz, ArgGen.agNone),
257 "types": TYPE_FI,
258 },
259 "max": {
260 "operands": (2, 0),
261 "build_fcn": (TBuilder.Max, TGen.tgBFuzz, ArgGen.agNone),
262 "types": TYPE_FI,
263 },
264 "pow": {
265 "operands": (2, 0),
266 "build_fcn": (TBuilder.Pow, TGen.tgBFuzz, ArgGen.agNone),
267 # Technically, integer is supported, but only for positive exponents.
268 # Needs a random argument generator.
269 "types": TYPE_F,
270 },
271 "abs": {
272 "operands": (1, 0),
273 "build_fcn": (TBuilder.Abs, TGen.tgBasic, ArgGen.agNone),
274 "types": TYPE_F,
275 },
276 "ceil": {
277 "operands": (1, 0),
278 "build_fcn": (TBuilder.Ceil, TGen.tgBasic, ArgGen.agNone),
279 "types": TYPE_F,
280 },
281 "floor": {
282 "operands": (1, 0),
283 "build_fcn": (TBuilder.Floor, TGen.tgBasic, ArgGen.agNone),
284 "types": TYPE_F,
285 },
286 "log": {
287 "operands": (1, 0),
288 "build_fcn": (TBuilder.Log, TGen.tgBasic, ArgGen.agNone),
289 "types": TYPE_F,
290 },
291 "negate": {
292 "operands": (1, 0),
293 "build_fcn": (TBuilder.Negate, TGen.tgBasic, ArgGen.agNone),
294 "types": TYPE_F,
295 },
296 "rsqrt": {
297 "operands": (1, 0),
298 "build_fcn": (TBuilder.Rsqrt, TGen.tgBasic, ArgGen.agNone),
299 "types": TYPE_F,
300 },
301 "sigmoid": {
302 "operands": (1, 0),
303 "build_fcn": (TBuilder.Sigmoid, TGen.tgBasic, ArgGen.agNone),
304 "types": {
305 "tf": TYPE_F,
306 "tflite": list(
307 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
308 ),
309 },
310 },
311 "tanh": {
312 "operands": (1, 0),
313 "build_fcn": (TBuilder.Tanh, TGen.tgBasic, ArgGen.agNone),
314 "types": {
315 "tf": TYPE_F,
316 "tflite": list(
317 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
318 ),
319 },
320 },
Luke Hutton41601862022-12-06 17:29:15 +0000321 "sin": {
322 "operands": (1, 0),
323 "build_fcn": (TBuilder.Sin, TGen.tgBasic, ArgGen.agNone),
324 "types": {
325 "tflite": TYPE_F,
326 },
327 },
328 "cos": {
329 "operands": (1, 0),
330 "build_fcn": (TBuilder.Cos, TGen.tgBasic, ArgGen.agNone),
331 "types": {
332 "tflite": TYPE_F,
333 },
334 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000335 "square": {
336 "operands": (1, 0),
337 "build_fcn": (TBuilder.Square, TGen.tgBasic, ArgGen.agNone),
338 "types": TYPE_F,
339 },
340 "squared_difference": {
341 "operands": (2, 0),
342 "build_fcn": (TBuilder.SquaredDifference, TGen.tgBFuzz, ArgGen.agNone),
343 "types": TYPE_F,
344 },
345 "equal": {
346 "operands": (2, 0),
347 "build_fcn": (TBuilder.Equal, TGen.tgBFuzz, ArgGen.agNone),
348 "types": TYPE_FI,
349 },
350 "greater_equal": {
351 "operands": (2, 0),
352 "build_fcn": (TBuilder.GreaterEqual, TGen.tgBFuzz, ArgGen.agNone),
353 "types": TYPE_FI,
354 },
355 "greater": {
356 "operands": (2, 0),
357 "build_fcn": (TBuilder.Greater, TGen.tgBFuzz, ArgGen.agNone),
358 "types": TYPE_FI,
359 },
360 "less": {
361 "operands": (2, 0),
362 "build_fcn": (TBuilder.Less, TGen.tgBFuzz, ArgGen.agNone),
363 "types": TYPE_FI,
364 },
365 "less_equal": {
366 "operands": (2, 0),
367 "build_fcn": (TBuilder.LessEqual, TGen.tgBFuzz, ArgGen.agNone),
368 "types": TYPE_FI,
369 },
370 "conv2d_TEMPLATE": {
371 "operands": (1, 1),
372 "build_fcn": (TBuilder.Conv2d, TGen.tgConv2d, ArgGen.agConv2d),
373 "types": {
374 "tf": [tf.float32],
375 "tflite": [
376 tf.float32,
377 QuantType.CONV_U8_U8,
378 QuantType.CONV_I8_I8,
379 QuantType.CONV_I16_I8,
380 ],
381 },
382 "template": True,
383 },
384 "conv2d_relu_TEMPLATE": {
385 "operands": (1, 2),
386 "build_fcn": (TBuilder.Conv2dRelu, TGen.tgConv2d, ArgGen.agNone),
387 "types": {
388 "tf": [tf.float32],
389 "tflite": [
390 tf.float32,
391 QuantType.CONV_U8_U8,
392 QuantType.CONV_I8_I8,
393 QuantType.CONV_I16_I8,
394 ],
395 },
396 "template": True,
397 },
398 "conv2d_relu6_TEMPLATE": {
399 "operands": (1, 2),
400 "build_fcn": (TBuilder.Conv2dRelu6, TGen.tgConv2d, ArgGen.agNone),
401 "types": {
402 "tf": [tf.float32],
403 "tflite": [
404 tf.float32,
405 QuantType.CONV_U8_U8,
406 QuantType.CONV_I8_I8,
407 QuantType.CONV_I16_I8,
408 ],
409 },
410 "template": True,
411 },
412 "conv2d_relu_n1_to_1_TEMPLATE": {
413 "operands": (1, 2),
414 "build_fcn": (TBuilder.Conv2dReluN1To1, TGen.tgConv2d, ArgGen.agNone),
415 "types": {
416 "tf": [tf.float32],
417 "tflite": [
418 tf.float32,
419 QuantType.CONV_U8_U8,
420 QuantType.CONV_I8_I8,
421 QuantType.CONV_I16_I8,
422 ],
423 },
424 "template": True,
425 },
426 # This test is converted as:
427 # tfl.conv2d(){fused_activation_function="NONE"} + tfl.tanh()
428 # TODO: anyway to generate tfl.conv2d(){fused_activation_function="TANH"}?
429 "conv2d_tanh_TEMPLATE": {
430 "operands": (1, 2),
431 "build_fcn": (TBuilder.Conv2dTanh, TGen.tgConv2d, ArgGen.agNone),
432 "types": {
433 "tf": [tf.float32],
434 "tflite": [
435 tf.float32,
436 QuantType.CONV_U8_U8,
437 QuantType.CONV_I8_I8,
438 QuantType.CONV_I16_I8,
439 ],
440 },
441 "template": True,
442 },
443 "conv2d_bias_TEMPLATE": {
444 "operands": (1, 2),
445 "build_fcn": (TBuilder.Conv2dWithBias, TGen.tgConv2d, ArgGen.agConv2d),
446 "types": {
447 "tf": [tf.float32],
448 "tflite": [
449 tf.float32,
450 QuantType.CONV_U8_U8,
451 QuantType.CONV_I8_I8,
452 QuantType.CONV_I16_I8,
453 ],
454 },
455 "bias": True,
456 "template": True,
457 },
TatWai Chongfd629052022-07-25 04:01:58 +0000458 "conv3d_TEMPLATE": {
459 "operands": (1, 1),
460 "build_fcn": (TBuilder.Conv3d, TGen.tgConv3d, ArgGen.agConv3d),
461 "types": {
462 "tf": [tf.float32],
463 "tflite": [
464 tf.float32,
465 QuantType.CONV_U8_U8,
466 QuantType.CONV_I8_I8,
467 # Quantization to 16x8-bit not yet supported by tflite.
468 ],
469 },
470 "template": True,
471 "rank": (1, 5),
472 },
473 "conv3d_bias_TEMPLATE": {
474 "operands": (1, 2),
475 "build_fcn": (TBuilder.Conv3dWithBias, TGen.tgConv3d, ArgGen.agConv3d),
476 "types": {
477 "tf": [tf.float32],
478 "tflite": [
479 tf.float32,
480 QuantType.CONV_U8_U8,
481 QuantType.CONV_I8_I8,
482 # Quantization to 16x8-bit not yet supported by tflite.
483 ],
484 },
485 "bias": True,
486 "template": True,
487 "rank": (1, 5),
488 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000489 "depthwise_conv2d_TEMPLATE": {
490 "operands": (1, 1),
491 "build_fcn": (
492 TBuilder.DepthwiseConv2d,
493 TGen.tgDepthwiseConv2d,
494 ArgGen.agDepthwiseConv2d,
495 ),
496 "types": {
497 "tf": [tf.float32],
498 "tflite": [
499 tf.float32,
500 QuantType.CONV_U8_U8,
501 QuantType.CONV_I8_I8,
502 QuantType.CONV_I16_I8,
503 ],
504 },
505 "template": True,
506 },
507 "depthwise_conv2d_bias_TEMPLATE": {
508 "operands": (1, 2),
509 "build_fcn": (
510 TBuilder.DepthwiseConv2dWithBias,
511 TGen.tgDepthwiseConv2d,
512 ArgGen.agDepthwiseConv2d,
513 ),
514 "types": {
515 "tf": [tf.float32],
516 "tflite": [
517 tf.float32,
518 QuantType.CONV_U8_U8,
519 QuantType.CONV_I8_I8,
520 QuantType.CONV_I16_I8,
521 ],
522 },
523 "bias": True,
524 "template": True,
525 },
526 "transpose_conv2d_TEMPLATE": {
527 "operands": (1, 1),
528 "build_fcn": (
529 TBuilder.TransposeConv2d,
530 TGen.tgTransposeConv2d,
531 ArgGen.agTransposeConv2d,
532 ),
533 "types": {
534 "tf": [tf.float32],
535 "tflite": [
536 tf.float32,
537 QuantType.CONV_U8_U8,
538 QuantType.CONV_I8_I8,
539 QuantType.CONV_I16_I8,
540 ],
541 },
542 "template": True,
543 },
544 "argmax": {
545 "operands": (1, 0),
546 "build_fcn": (TBuilder.Argmax, TGen.tgBasic, ArgGen.agAxes),
547 "types": {"tf": TYPE_F},
548 },
549 "avg_pool2d": {
550 "operands": (1, 0),
551 "build_fcn": (TBuilder.AvgPool2d, TGen.tgPooling, ArgGen.agPooling),
552 "types": {
553 "tf": TYPE_F,
554 "tflite": list(
555 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
556 ),
557 },
558 },
559 "max_pool2d": {
560 "operands": (1, 0),
561 "build_fcn": (TBuilder.MaxPool2d, TGen.tgPooling, ArgGen.agPooling),
562 "types": {
563 "tf": TYPE_F,
564 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
565 # ALL_I16 not supported yet
566 # In tensorflow/compiler/mlir/lite/ir/tfl_ops.td,
567 # QI16 is missing from MaxPoolOperandAndResultConstraints
568 # If adding QI16 back this test can run through.
569 },
570 },
571 "reshape": {
572 "operands": (1, 0),
573 "build_fcn": (TBuilder.Reshape, TGen.tgBasic, ArgGen.agReshape),
574 "types": TYPE_FI,
575 },
576 "transpose": {
577 "operands": (1, 0),
578 "build_fcn": (TBuilder.Transpose, TGen.tgBasic, ArgGen.agTranspose),
579 "types": TYPE_FI,
580 },
581 "slice": {
582 "operands": (1, 0),
583 "build_fcn": (TBuilder.Slice, TGen.tgBasic, ArgGen.agSlice),
584 "types": TYPE_FI,
585 },
586 "strided_slice": {
587 "operands": (1, 0),
588 "build_fcn": (TBuilder.StridedSlice, TGen.tgBasic, ArgGen.agStridedSlice),
589 "types": TYPE_FI,
590 },
591 "select": {
592 "operands": (3, 0),
593 "build_fcn": (TBuilder.Select, TGen.tgSelect, ArgGen.agNone),
594 "types": TYPE_FI,
595 },
596 "addn": {
597 "operands": (4, 0),
598 "build_fcn": (TBuilder.Addn, TGen.tgBasic, ArgGen.agNone),
599 "types": TYPE_FI,
600 },
601 "concatv2": {
602 "operands": (4, 0),
603 "build_fcn": (TBuilder.Concatv2, TGen.tgBasic, ArgGen.agAxes),
604 "types": TYPE_FI,
605 },
606 "stack": {
607 "operands": (4, 0),
608 "build_fcn": (TBuilder.Stack, TGen.tgBasic, ArgGen.agStack),
609 "types": TYPE_FI,
610 },
611 "unstack": {
612 "operands": (1, 0),
613 "build_fcn": (TBuilder.Unstack, TGen.tgPooling, ArgGen.agAxes),
614 "types": TYPE_F,
615 },
TatWai Chongf7008da2022-09-09 09:35:40 +0000616 "mirrorpad": {
617 "operands": (1, 0),
618 "build_fcn": (TBuilder.MirrorPad, TGen.tgBasic, ArgGen.agMirrorPad),
619 "types": TYPE_FI,
620 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000621 "pad": {
622 "operands": (1, 0),
623 "build_fcn": (TBuilder.Pad, TGen.tgBasic, ArgGen.agPad),
624 "types": TYPE_F,
625 },
626 "expand_dims": {
627 "operands": (1, 0),
628 "build_fcn": (TBuilder.ExpandDims, TGen.tgBasic, ArgGen.agStack),
629 "types": TYPE_FI,
630 },
631 "shape": {
632 "operands": (1, 0),
633 "build_fcn": (TBuilder.Shape, TGen.tgBasic, ArgGen.agNone),
634 "types": TYPE_FI,
635 },
636 "rank": {
637 "operands": (1, 0),
638 "build_fcn": (TBuilder.Rank, TGen.tgBasic, ArgGen.agNone),
639 "types": TYPE_FI,
640 },
641 "fill": {
642 "operands": (1, 0),
643 "build_fcn": (TBuilder.Fill, TGen.tgBasic, ArgGen.agFill),
644 "types": TYPE_FI,
645 },
646 "elu": {
647 "operands": (1, 0),
648 "build_fcn": (TBuilder.Elu, TGen.tgBasic, ArgGen.agNone),
649 "types": TYPE_F,
650 },
651 "softmax": {
652 "operands": (1, 0),
653 "build_fcn": (TBuilder.Softmax, TGen.tgBasic, ArgGen.agNone),
654 "types": {
655 "tf": TYPE_F,
656 "tflite": list(
657 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
658 ),
659 },
660 },
661 "log_softmax": {
662 "operands": (1, 0),
663 "build_fcn": (TBuilder.LogSoftmax, TGen.tgBasic, ArgGen.agNone),
664 "types": TYPE_F,
665 },
666 "matmul": {
667 "operands": (2, 0),
668 "build_fcn": (TBuilder.MatMul, TGen.tgMatmul, ArgGen.agNone),
669 "types": {
670 "tf": TYPE_F,
671 "tflite": list(
672 TYPE_F
673 + [QuantType.ALL_U8, QuantType.ALL_I8]
674 # 16 bits matmul fail to convert
675 ),
676 },
677 },
678 "add_scalar": {
679 "operands": (1, 0),
680 "build_fcn": (TBuilder.AddScalar, TGen.tgBasic, ArgGen.agNone),
681 "types": TYPE_F,
682 },
683 "add_1d": {
684 "operands": (2, 0),
685 "build_fcn": (TBuilder.Add1d, TGen.tgBasic, ArgGen.agNone),
686 "types": TYPE_F,
687 },
688 "split": {
689 "operands": (1, 0),
690 "build_fcn": (TBuilder.Split, TGen.tgBasic, ArgGen.agSplit),
691 "types": TYPE_FI,
692 },
693 "tile": {
694 "operands": (1, 0),
695 "build_fcn": (TBuilder.Tile, TGen.tgBasic, ArgGen.agTile),
696 "types": TYPE_FI,
697 },
698 "reverse": {
699 "operands": (1, 0),
700 "build_fcn": (TBuilder.Reverse, TGen.tgBasic, ArgGen.agAxes),
701 "types": {"tf": TYPE_FI},
702 },
703 "gather": {
704 "operands": (1, 0),
705 "build_fcn": (TBuilder.Gather, TGen.tgBasic, ArgGen.agGather),
706 "types": TYPE_FI,
707 },
708 "gather_nd": {
709 "operands": (1, 0),
710 "build_fcn": (TBuilder.GatherNd, TGen.tgBasic, ArgGen.agGatherND),
711 "types": TYPE_FI,
712 },
713 "scatter_nd": {
714 "operands": (1, 0),
715 "build_fcn": (TBuilder.ScatterNd, TGen.tgBasic, ArgGen.agScatterND),
716 "types": TYPE_FI,
717 },
718 "space_to_batch": {
719 "operands": (1, 0),
720 "build_fcn": (TBuilder.SpaceToBatch, TGen.tgBasic, ArgGen.agSpaceToBatch),
721 "types": TYPE_F,
722 },
723 "batch_to_space": {
724 "operands": (1, 0),
725 "build_fcn": (TBuilder.BatchToSpace, TGen.tgBasic, ArgGen.agBatchToSpace),
726 "types": TYPE_F,
727 },
728 "space_to_depth": {
729 "operands": (1, 0),
730 "build_fcn": (TBuilder.SpaceToDepth, TGen.tgBasic, ArgGen.agSpaceToDepth),
731 "types": TYPE_F,
732 },
733 "depth_to_space": {
734 "operands": (1, 0),
735 "build_fcn": (TBuilder.DepthToSpace, TGen.tgBasic, ArgGen.agDepthToSpace),
736 "types": TYPE_F,
737 },
738 "one_hot": {
739 "operands": (3, 1),
740 "build_fcn": (TBuilder.OneHot, TGen.tgOneHot, ArgGen.agOneHot),
741 "types": TYPE_FI,
742 },
743 "fakequant": {
744 "operands": (1, 0),
745 "build_fcn": (
746 TBuilder.Fakequant,
747 TGen.tgBasic,
748 ArgGen.agFakequant,
749 ),
750 "types": {"tf": TYPE_F},
751 },
752 "resize_nearest": {
753 "operands": (1, 0),
754 "build_fcn": (TBuilder.ResizeNearest, TGen.tgPooling, ArgGen.agNone),
755 "types": {
756 "tf": TYPE_F,
757 "tflite": list(
758 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
759 ),
760 },
761 },
762 "resize_bilinear": {
763 "operands": (1, 0),
764 "build_fcn": (TBuilder.ResizeBilinear, TGen.tgPooling, ArgGen.agNone),
765 "types": {
766 "tf": TYPE_F,
767 "tflite": list(
768 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
769 ),
770 },
771 },
TatWai Chongf7326092022-06-08 12:17:14 -0700772 "resize_bilinear_v1_align_corners": {
773 "operands": (1, 0),
774 "build_fcn": (
775 TBuilder.ResizeBilinearV1AlignCorners,
776 TGen.tgPooling,
777 ArgGen.agNone,
778 ),
779 "types": {
780 "tf": TYPE_F,
781 "tflite": list(
782 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
783 ),
784 },
785 },
786 "resize_bilinear_v1_none": {
787 "operands": (1, 0),
788 "build_fcn": (TBuilder.ResizeBilinearV1None, TGen.tgPooling, ArgGen.agNone),
789 "types": {
790 "tf": TYPE_F,
791 "tflite": list(
792 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
793 ),
794 },
795 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000796 "left_shift": {
797 "operands": (1, 0),
798 "build_fcn": (TBuilder.LeftShift, TGen.tgBasic, ArgGen.agShift),
799 "types": {"tf": [tf.int32]},
800 },
801 "right_shift": {
802 "operands": (1, 0),
803 "build_fcn": (TBuilder.RightShift, TGen.tgBasic, ArgGen.agShift),
804 "types": {
805 "tf": [
806 tf.int32,
807 ]
808 },
809 },
810}
811
812# Shapes to be tested; default can be overwritten
813shape_list = [
814 (1,),
815 (64,),
816 (14, 19),
817 (13, 21, 3),
818 (1, 4, 4, 4),
819 (1, 8, 4, 17),
820 (1, 4, 8, 19),
821 (1, 32, 32, 8),
822 (1, 7, 7, 9),
TatWai Chongfd629052022-07-25 04:01:58 +0000823 (2, 2, 7, 7, 2),
824 (1, 4, 8, 21, 17),
825 (3, 32, 16, 16, 5),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000826]
827
828
829def gen_rand_shapes(args):
830 """Overwrite the global shape list with a new list of random shapes"""
831 global shape_list
832
833 rng = np.random.default_rng(args.random_seed)
834
835 # Don't let things get too big... cap the maximum volume, but let
836 # an individual dimension be 1..47
837 max_total_volume = 32 * 32 * 4
838
839 shape_list = []
TatWai Chongfd629052022-07-25 04:01:58 +0000840 # Only iterate over ranks 2, 3, 4, and 5
841 for rank in range(2, 6):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000842 for n in range(args.random_shapes):
843 new_shape = rng.integers(1, 48, size=rank)
844
TatWai Chongfd629052022-07-25 04:01:58 +0000845 # Set the batch dimension on 4D or 5D objects to 1
846 if rank == 4 or rank == 5:
Jeremy Johnson015c3552022-02-23 12:15:03 +0000847 new_shape[0] = 1
848
849 # Limit the total shape volume and throw out any
850 # shapes that wouldn't leave at least size=2 in some non-batch dimension
851 volume = 1
852 skip_shape = False
853 for i in range(rank):
854
855 volume *= new_shape[i]
856
857 # Reduce the shape, while it's larger than the maximum volume
858 while volume > max_total_volume:
859 new_shape[i] = new_shape[i] // 2
860 volume = volume // 2
861
862 # Now an untenable dimension size? Skip this one.
863 if new_shape[i] < 1:
864 skip_shape = True
865
866 if not skip_shape:
867 shape_list.append(tuple(new_shape))
868
869
870# Construct, run and save a whole tensorflow tf.function to a protobuf file
871# or convert to .tflite if it's quantized unit test
872def run_unit_test(
873 op_name,
874 args,
875 test_dir,
876 curr_shape,
877 addl_args,
878 dtype,
879 excluded_framework_list,
880 quantized_inference_dtype,
881 result_name,
882 seed,
883):
884
885 try:
886 op = TF_OP_LIST[op_name]
887 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
888
889 # Get and seed a random number generator for this test
890 rng = np.random.default_rng(seed)
891
892 # return placeholders=(str: name, np.array: value)
893 # consts=(str: name, np.array: value)
894 placeholders, consts = tensor_gen_fcn(op, curr_shape, dtype, rng)
895
896 # if test doesn't have any placeholders/consts, terminated
897 if len(placeholders) == 0 and len(consts) == 0:
898 return True
899
900 if not args.quiet:
901 print(" {} ".format(test_dir))
902
903 try:
904 os.mkdir(test_dir)
905 except FileExistsError:
906 pass
907
908 const_nodes = [value for name, value in consts]
909
910 num_placeholders = len(placeholders)
911 # if test is quantized, create tensor quantization metadata info for
912 # each input tensor, based on different quantized type
913 if quantized_inference_dtype:
914 is_quantized = True
915 # TODO: support INT8 IFM x INT4 weight later
916 if quantized_inference_dtype == QuantType.ALL_U8:
917 qzero = [128] * num_placeholders
918 numpy_dtype = [np.uint8] * num_placeholders
919 tflite_inference_dtype = tf.uint8
920 elif quantized_inference_dtype == QuantType.ALL_I8:
921 qzero = [0] * num_placeholders
922 numpy_dtype = [np.int8] * num_placeholders
923 tflite_inference_dtype = tf.int8
924 elif quantized_inference_dtype == QuantType.ALL_I16:
925 qzero = [0] * num_placeholders
926 numpy_dtype = [np.int16] * num_placeholders
927 tflite_inference_dtype = tf.int16
928 elif quantized_inference_dtype == QuantType.CONV_U8_U8:
929 assert (
930 num_placeholders == 1
931 ), "Unsupported number of placeholders for Convolution: {}".format(
932 num_placeholders
933 )
934 qzero = [128] * num_placeholders
935 if num_placeholders == 2:
936 numpy_dtype = [np.uint8, np.uint8]
937 else:
938 numpy_dtype = [np.uint8, np.uint8, np.int32]
939 tflite_inference_dtype = tf.uint8
940 elif quantized_inference_dtype == QuantType.CONV_I8_I8:
941 assert (
942 num_placeholders == 1
943 ), "Unsupported number of placeholders for Convolution: {}".format(
944 num_placeholders
945 )
946 qzero = [0] * num_placeholders
947 if num_placeholders == 2:
948 numpy_dtype = [np.int8, np.int8]
949 else:
950 numpy_dtype = [np.int8, np.int8, np.int32]
951 tflite_inference_dtype = tf.int8
952 elif quantized_inference_dtype == QuantType.CONV_I16_I8:
953 assert (
954 num_placeholders == 1
955 ), "Unsupported number of placeholders for Convolution: {}".format(
956 num_placeholders
957 )
958 if num_placeholders == 2:
959 qzero = [0, 0]
960 numpy_dtype = [np.int16, np.int8]
961 else:
962 qzero = [0, 0, 0]
963 numpy_dtype = [
964 np.int16,
965 np.int8,
966 np.int64,
967 ] # np.int64 to represent 40 bits accumulator
968 tflite_inference_dtype = tf.int16
969 else:
970 raise Exception(
971 "Unsupported fakequant dtype: {}".format(quantized_inference_dtype)
972 )
973
974 else:
975 is_quantized = False
976
977 tf_model_filename = None
978 tf_result_npy_filename = None
979 tf_result_name = None
980
981 tflite_model_filename = None
982 tflite_result_npy_filename = None
983 tflite_result_name = None
984
985 placeholder_names = []
986 placeholder_vals = []
987 placeholder_signatures = ()
988 placeholder_npy_filenames = []
989 placeholder_shapes = []
990
991 for idx, (name, val) in enumerate(placeholders):
992 placeholder_names.append(name)
993 placeholder_signatures = placeholder_signatures + (
994 tf.TensorSpec(shape=val.shape, dtype=val.dtype, name=name),
995 )
996 placeholder_npy_filenames.append("{}.npy".format(name.split(":")[0]))
997 placeholder_shapes.append(val.shape)
998
999 # Get test builder class
1000 fcn_node = op_fcn(*const_nodes, *addl_args, result_name)
1001 concrete_function = tf.function(input_signature=placeholder_signatures)(
1002 fcn_node.eval
1003 ).get_concrete_function()
1004
1005 if is_quantized:
1006
1007 assert dtype is tf.float32, "quantized test must come from float32 graph"
1008
1009 # 1. Quantize float placeholder npy to quantized to feed the graph
1010 for idx, (name, val) in enumerate(placeholders):
1011
1012 # we use np.amin()/np.amax() to determine dynamic range
1013 # for quantized test
1014 zeropoint = 0
1015 scale = 1.0
1016 if numpy_dtype[idx] != np.int64:
1017 qmin = np.iinfo(numpy_dtype[idx]).min
1018 qmax = np.iinfo(numpy_dtype[idx]).max
1019 num_bits = np.iinfo(numpy_dtype[idx]).bits
1020 # 40 bit is represented as np.int64
1021 else:
1022 num_bits = 40
1023 qmin = -(1 << num_bits)
1024 qmax = (1 << num_bits) - 1
1025
1026 min_val = np.amin(val)
1027 max_val = np.amax(val)
1028
1029 # for single value tensor, we set scale equal to the abs(value),
1030 # and fix zeropoint to 128
1031 # if val > 0, it'll be represented as 129,
1032 # where val = (129 - 128) * val
1033 # if val < 0, it'll be represented as 127,
1034 # where val = (127 - 128) * (-val)
1035 # if val == 0, it'll be represted as 128, with range [-128.0, 128.0]
1036 # and let quantized 1 represent the value
1037 # also adjust effective min/max consequently
1038 if max_val == min_val:
1039 if max_val != 0:
1040 scale = abs(max_val)
1041 else:
1042 scale = 1.0
1043 min_val = float(qmin - qzero[idx]) * scale
1044 max_val = float(qmax - qzero[idx]) * scale
1045 else:
1046 scale = (max_val - min_val) / float(qmax - qmin)
1047 zeropoint = int(round((-min_val) / scale)) + qmin
1048
1049 # run through tf.fakequant first to assure quantization error aligned
1050 fakequant_val = tf.quantization.fake_quant_with_min_max_args(
1051 val,
1052 min=min_val,
1053 max=max_val,
1054 num_bits=num_bits,
1055 name="gen_quant_npy",
1056 )
1057
1058 quant_val = np.round(fakequant_val / scale).astype(np.int32) + zeropoint
1059
1060 # very few unit tests after TF hash may/2020, this quantized
1061 # value for some reason exceed [0, 255] range
1062 saved_val = np.clip(quant_val, qmin, qmax).astype(numpy_dtype[idx])
1063
1064 # saved all quantized tensor as np.int32
1065 # since TOSA numpy Cpp API only supports int32
1066 np.save(
1067 os.path.join(test_dir, placeholder_npy_filenames[idx]),
1068 saved_val.astype(np.int32),
1069 False,
1070 )
1071
1072 placeholder_vals.append(tf.convert_to_tensor(saved_val))
1073
1074 # 2. Convert the model to quantized TFLite flatbuffer
1075 module = tf.Module()
1076 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1077 [concrete_function], module
1078 )
1079 converter.optimizations = [tf.lite.Optimize.DEFAULT]
1080 converter.experimental_new_converter = True
1081
1082 # use MLIR-based post-quantizer
1083 converter.experimental_new_quantizer = True
1084
1085 flag = (
1086 tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 # noqa: E501
1087 )
1088 if tflite_inference_dtype == tf.int16:
1089 converter.target_spec.supported_ops = [flag]
1090
1091 def input_stats():
1092 for i in range(0, args.num_samples):
1093 a = [
1094 TGen.getRand(shape, tf.float32, rng)
1095 for shape in placeholder_shapes
1096 ]
1097 yield a
1098
1099 converter.representative_dataset = input_stats
1100 converter.inference_input_type = tflite_inference_dtype
1101 converter.inference_output_type = tflite_inference_dtype
1102
1103 tflite_model = converter.convert()
1104
1105 tflite_model_filename = "model.tflite"
1106
1107 # Write out converted model to disk
1108 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1109 f.write(tflite_model)
1110
1111 else: # is_quantized is False
1112
1113 # 1. Saved out numpy array directly
1114 for idx, (name, val) in enumerate(placeholders):
1115 placeholder_vals.append(tf.convert_to_tensor(val))
1116 np.save(
1117 os.path.join(test_dir, placeholder_npy_filenames[idx]), val, False
1118 )
1119
1120 # 2.a Saved out .pb if framework includes tensorflow
1121 if "tf" not in excluded_framework_list:
1122 # Write out graph as protobuf to disk
1123 tf_model_filename = "model.pb"
1124 tf.io.write_graph(
1125 concrete_function.graph, test_dir, tf_model_filename, True
1126 )
1127
1128 # 2.b Saved out .tflite if framework includes tflite
1129 if "tflite" not in excluded_framework_list:
1130 # Convert the model to TFLite flatbuffer
1131 module = tf.Module()
1132 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1133 [concrete_function], module
1134 )
1135
1136 converter.experimental_new_converter = True
1137
1138 # Even it's non-quantized int32 test, this needs to be set to tf.float32
1139 converter.inference_input_type = tf.float32
1140 converter.inference_output_type = tf.float32
1141 tflite_model = converter.convert()
1142
1143 # Write out converted model to disk
1144 tflite_model_filename = "model.tflite"
1145 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1146 f.write(tflite_model)
1147
1148 # Get TF reference result if .pb is specified
1149 if tf_model_filename:
1150 tf_result_npy_filename = "tf_result.npy"
1151 tf_result = concrete_function(*placeholder_vals)
1152 np.save(os.path.join(test_dir, tf_result_npy_filename), tf_result, False)
1153
1154 tf_result_name = result_name
1155
1156 # Get TFLite inference result if .tflite is specified
1157 if tflite_model_filename:
1158 tflite_result_npy_filename = "tflite_result.npy"
1159
1160 ops_with_optimized_only_kernel = ["elu", "ceil", "gather"]
1161
1162 if args.tflite_kernel_mode == "optimized" or (
1163 op_name in ops_with_optimized_only_kernel
1164 ):
1165 interpreter = tf.lite.Interpreter(
1166 model_path=os.path.join(test_dir, tflite_model_filename)
1167 )
1168 elif args.tflite_kernel_mode == "reference":
1169 interpreter = tf.lite.Interpreter(
1170 model_path=os.path.join(test_dir, tflite_model_filename),
1171 experimental_op_resolver_type=OpResolverType.BUILTIN_REF,
1172 )
1173 else:
1174 assert 0, "unknown tflite interpreter mode {}".format(
1175 args.tflite_kernel_mode
1176 )
1177 interpreter.allocate_tensors()
1178
1179 input_details = interpreter.get_input_details()
1180 output_details = interpreter.get_output_details()
1181
1182 assert len(input_details) == len(
1183 placeholder_vals
1184 ), "number of placeholder mismatch"
1185
1186 for idx, val in enumerate(placeholder_vals):
1187 interpreter.set_tensor(input_details[idx]["index"], val.numpy())
1188
1189 interpreter.invoke()
1190 tflite_result = interpreter.get_tensor(output_details[0]["index"])
1191
1192 np.save(
1193 os.path.join(test_dir, tflite_result_npy_filename), tflite_result, False
1194 )
1195
1196 # Result tensor name would change after converting to TFLite flatbuffer
1197 # Overwrite the information from TFLite models directly.
1198 # Assume single result tensor now
1199 tflite_result_name = output_details[0]["name"]
1200
1201 # Write out test descriptor
1202 write_test_json(
1203 filename=os.path.join(test_dir, "test.json"),
1204 tf_model_filename=tf_model_filename,
1205 tf_result_npy_filename=tf_result_npy_filename,
1206 tf_result_name=tf_result_name,
1207 tflite_model_filename=tflite_model_filename,
1208 tflite_result_npy_filename=tflite_result_npy_filename,
1209 tflite_result_name=tflite_result_name,
1210 ifm_name=placeholder_names,
1211 ifm_file=placeholder_npy_filenames,
1212 ifm_shape=placeholder_shapes,
1213 framework_exclusions=excluded_framework_list,
1214 quantized=is_quantized,
1215 )
1216 except Exception as e:
1217 msg = "Error running task: {}".format(e)
1218 print(msg)
1219 print(
1220 "".join(
1221 traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)
1222 )
1223 )
1224 return False
1225 return True
1226
1227
1228def build_const_net(
1229 args,
1230 curr_shape,
1231 op_name,
1232 dtype,
1233 excluded_framework_list,
1234 quantized_inference_dtype,
1235 result_name,
1236 seed,
1237 rng,
1238 filter,
1239 unit_test_args,
1240):
1241
1242 if quantized_inference_dtype:
1243 quant_dtype = get_tf_dtype(quantized_inference_dtype)
1244 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, quant_dtype))
1245 else:
1246 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, dtype))
1247 test_dir = os.path.join(args.output_dir, test_dir)
1248
1249 # If the operator has an additional function to generate arguments, call it
1250 # here and iterate through the argument list that it generates
1251 op = TF_OP_LIST[op_name]
1252 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1253
TatWai Chongfd629052022-07-25 04:01:58 +00001254 try:
1255 rank_lo, rank_hi = op["rank"]
1256 except KeyError:
1257 # Set testing rank to (1, 4) in default.
1258 rank_lo = 1
1259 rank_hi = 4
1260
1261 if len(curr_shape) not in range(rank_lo, rank_hi + 1):
1262 return
1263
Jeremy Johnson015c3552022-02-23 12:15:03 +00001264 addl_args_tuple = arg_gen_fcn(op, curr_shape, rng)
1265 for desc, addl_args in addl_args_tuple:
Jeremy Johnson0e6218e2022-05-05 17:08:04 +01001266 # Only filter on the full test_name, not the output directory
1267 _, test_name = os.path.split(test_dir + desc)
1268 if not filter or filter.search(test_name):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001269 unit_test_args.append(
1270 [
1271 op_name,
1272 args,
1273 test_dir + desc,
1274 curr_shape,
1275 addl_args,
1276 dtype,
1277 excluded_framework_list,
1278 quantized_inference_dtype,
1279 result_name,
1280 seed,
1281 ]
1282 )
1283
1284
1285# python hash is not reproducible, create hash for our purpose
1286def op_name_hash(op_name):
1287 result = 0xDEADBEEF
1288 for ch in op_name:
1289 if result & 1:
1290 result = (ord(ch) << 24) ^ (result >> 1) ^ 0x82608EDB
1291 else:
1292 result = (ord(ch) << 24) ^ (result >> 1)
1293
1294 return result
1295
1296
1297def generate_op_tests(args, op_name, shape_list, result_name, filter, unit_test_args):
1298
1299 if not args.quiet:
1300 print(
1301 "Generating tests for {} ".format(
1302 op_name
1303 )
1304 )
1305
1306 op = TF_OP_LIST[op_name]
1307
1308 # Seed the RNG so that we get the same random tests for each test each time
1309 # If the number of tests for a given generation function changes, the tests
1310 # for that operator may also change accordingly, but this will at least keep
1311 # down churn across operators.
1312
1313 bounded_hash_val = (args.random_seed + op_name_hash(op_name)) % np.iinfo(
1314 np.int32
1315 ).max
1316 rng = np.random.default_rng(bounded_hash_val)
1317
1318 # this is a dictionary with 'tf' and 'tflite' as key
1319 # and value being the data types we want to test under these framework
1320
1321 if isinstance(op["types"], dict):
1322 try:
1323 tf_dtypes = op["types"]["tf"]
1324 except KeyError:
1325 tf_dtypes = []
1326 try:
1327 tflite_dtypes = op["types"]["tflite"]
1328 except KeyError:
1329 tflite_dtypes = []
1330 elif isinstance(op["types"], list):
1331 tf_dtypes = op["types"]
1332 tflite_dtypes = op["types"]
1333
1334 tf_nonquantized_dtypes = tf_dtypes # tf doesn't support quantized data types
1335 tflite_quantized_dtypes = []
1336 tflite_nonquantized_dtypes = []
1337 for dtype in tflite_dtypes:
1338 if isinstance(dtype, QuantType):
1339 tflite_quantized_dtypes.append(dtype)
1340 else:
1341 tflite_nonquantized_dtypes.append(dtype)
1342
1343 nonquantized_dtypes_set = set(tf_nonquantized_dtypes).union(
1344 set(tflite_nonquantized_dtypes)
1345 )
1346 nonquantized_dtypes = list(nonquantized_dtypes_set)
1347 quantized_dtypes = tflite_quantized_dtypes
1348
1349 # populate non quantized unit test arguments
1350 for dtype in nonquantized_dtypes:
1351
1352 excluded_framework_set = set(ALL_FRAMEWORKS)
1353 if dtype in tf_nonquantized_dtypes:
1354 excluded_framework_set.remove("tf")
1355 if dtype in tflite_nonquantized_dtypes:
1356 excluded_framework_set.remove("tflite")
1357 excluded_framework_list = list(excluded_framework_set)
1358
1359 for curr_shape in shape_list:
1360 build_const_net(
1361 args,
1362 curr_shape,
1363 op_name,
1364 dtype,
1365 excluded_framework_list,
1366 None,
1367 result_name,
1368 bounded_hash_val,
1369 rng,
1370 filter,
1371 unit_test_args,
1372 )
1373
1374 # populate quantized unit test arguments
1375 # must exclude 'tf' and source dtype being tf.float32
1376 for dtype in quantized_dtypes:
1377 for curr_shape in shape_list:
1378 build_const_net(
1379 args,
1380 curr_shape,
1381 op_name,
1382 tf.float32,
1383 ["tf"],
1384 dtype,
1385 result_name,
1386 bounded_hash_val,
1387 rng,
1388 filter,
1389 unit_test_args,
1390 )
1391
1392 return unit_test_args
1393
1394
1395def createDynamicOpLists():
1396 """The templated operators are conv2d-style operators with a number of kernel
1397 sizes. Since the operator is unchanged, we generate the range of kernel
1398 sizes here in this loop and remove the original templates from the list.
1399
1400 This could be expanded to non-conv2d-style operators in the future."""
1401
1402 # Dynamically create op lists for convolutions with a list of kernel sizes
1403 KERNELS = [
1404 [1, 1],
1405 [3, 3],
1406 [5, 5],
1407 ]
1408
TatWai Chongfd629052022-07-25 04:01:58 +00001409 # dim = [D, H, W]
1410 KERNELS_3D = [
1411 [1, 1, 1],
1412 [2, 3, 3],
1413 [3, 5, 5],
1414 ]
1415
Jeremy Johnson015c3552022-02-23 12:15:03 +00001416 TEMPLATE_LIST = [
1417 "conv2d",
1418 "conv2d_bias",
1419 "conv2d_relu",
1420 "conv2d_relu6",
1421 "conv2d_relu_n1_to_1",
1422 "conv2d_tanh",
1423 "depthwise_conv2d",
1424 "depthwise_conv2d_bias",
1425 "transpose_conv2d",
1426 ]
1427
TatWai Chongfd629052022-07-25 04:01:58 +00001428 TEMPLATE_LIST_CONV3D = [
1429 "conv3d",
1430 "conv3d_bias",
1431 ]
1432
Jeremy Johnson015c3552022-02-23 12:15:03 +00001433 for t in TEMPLATE_LIST:
1434 for k in KERNELS:
1435 testName = "{}_{}x{}".format(t, k[0], k[1])
1436 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1437 TF_OP_LIST[testName]["filter"] = k
1438 TF_OP_LIST[testName]["template"] = False
1439
TatWai Chongfd629052022-07-25 04:01:58 +00001440 # The existing operators don't support the dimension of kernel that is higher than 2.
1441 for t in TEMPLATE_LIST_CONV3D:
1442 for k in KERNELS_3D:
1443 testName = "{}_{}x{}x{}".format(t, k[0], k[1], k[2])
1444 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1445 TF_OP_LIST[testName]["filter"] = k
1446 TF_OP_LIST[testName]["template"] = False
1447
Jeremy Johnson015c3552022-02-23 12:15:03 +00001448 # Delete any templates after having created any dynamic ops
1449 # This is a two-pass operation because it's bad practice to delete
1450 # keys from dictionaries while iterating
1451 keyList = []
1452 for k in TF_OP_LIST:
1453 try:
1454 if TF_OP_LIST[k]["template"]:
1455 keyList.append(k)
1456 continue
1457 except KeyError:
1458 pass
1459
1460 for k in keyList:
1461 del TF_OP_LIST[k]
1462
1463
1464def main():
1465 parser = argparse.ArgumentParser()
1466 parser.add_argument(
1467 "--seed", dest="random_seed", default=42, type=int, help="Random seed"
1468 )
1469 parser.add_argument(
1470 "--random-shapes",
1471 dest="random_shapes",
1472 default=0,
1473 type=int,
1474 help=(
1475 "Use N random shapes of each rank for generating tests,"
1476 "seeded with random seed"
1477 ),
1478 )
1479 parser.add_argument(
1480 "-o",
1481 "--output-dir",
1482 dest="output_dir",
1483 default=".",
1484 type=str,
1485 help="Test output directory path prefix",
1486 )
1487 parser.add_argument(
1488 "-q",
1489 "--quiet",
1490 dest="quiet",
1491 default=False,
1492 action="store_true",
1493 help="Do not print test names",
1494 )
1495 parser.add_argument(
1496 "-j", "--jobs", dest="jobs", type=int, default=1, help="Number of parallel jobs"
1497 )
1498 parser.add_argument(
1499 "-m",
1500 "--tflite-kernel-mode",
1501 dest="tflite_kernel_mode",
1502 type=str,
1503 choices=["reference", "optimized"],
1504 default="reference",
1505 help="TFLite interpreter kernel mode",
1506 )
1507 parser.add_argument(
1508 "--num-samples",
1509 dest="num_samples",
1510 default=200,
1511 type=int,
1512 help="Number of input samples for post-training quantization",
1513 )
1514 parser.add_argument(
1515 "--filter",
1516 dest="filter",
1517 default="",
1518 type=str,
1519 help="Filter test names by this expression",
1520 )
1521 args = parser.parse_args()
1522
1523 # Turn the filter into a re object if present
1524 filter = None
1525 if args.filter != "":
1526 filter = re.compile(args.filter)
1527
1528 # Autodetect CPU count
1529 if args.jobs <= 0:
1530 args.jobs = os.cpu_count()
1531
1532 # Disable TF info messages
1533 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
1534
1535 try:
1536 os.makedirs(args.output_dir)
1537 except FileExistsError:
1538 pass
1539
1540 if args.random_shapes:
1541 gen_rand_shapes(args)
1542
1543 # Build dynamic ops
1544 createDynamicOpLists()
1545
1546 # Generate the test list and arguments to run_unit_test()
1547 unit_test_args = []
1548
1549 for op in TF_OP_LIST:
1550 generate_op_tests(args, op, shape_list, "result", filter, unit_test_args)
1551
1552 errors = 0
1553 for t in unit_test_args:
1554 if not run_unit_test(*t):
1555 errors = errors + 1
1556
1557 if not args.quiet:
1558 print("\nAll tasks done - with {} errors".format(errors))
1559
1560 return 1 if errors else 0
1561
1562
1563if __name__ == "__main__":
1564 exit(main())