blob: fb7f35aa08fb942d258271a45d77503de8ac55cd [file] [log] [blame]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001#!/usr/bin/env python3
2# Copyright (c) 2020-2022, ARM Limited.
3# SPDX-License-Identifier: Apache-2.0
4import argparse
5import os
6import re
7import traceback
8
9import numpy as np
10
11# Level | Level for Humans | Level Description
12# -------|------------------|------------------------------------
13# 0 | DEBUG | [Default] Print all messages
14# 1 | INFO | Filter out INFO messages
15# 2 | WARNING | Filter out INFO & WARNING messages
16# 3 | ERROR | Filter out all messages
17# Filter tensorflow debug message except errors
18os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
19
20# Flake8 E402 - ignore imports not at top of file to allow os.environ setting
21import tensorflow as tf # noqa: E402
22from frameworks.write_test_json import write_test_json # noqa: E402
23from frameworks.arg_gen import ArgGen # noqa: E402
24from frameworks.tensor_gen import TGen # noqa: E402
25from frameworks.test_builder import TBuilder # noqa: E402
Jeremy Johnson5d1a3472022-03-31 09:50:06 +010026from frameworks.test_gen_utils import ( # noqa: E402
Jeremy Johnson015c3552022-02-23 12:15:03 +000027 QuantType,
28 get_tf_dtype,
29 get_shape_str,
30) # noqa: E402
31from tensorflow.lite.python.interpreter import OpResolverType # noqa: E402
32
33# All of the supported frameworks
34ALL_FRAMEWORKS = ["tf", "tflite"]
35
36# Lists of different data types
37TYPE_F = [tf.float32]
38TYPE_I = [tf.int32]
39TYPE_FI = [tf.float32, tf.int32]
40TYPE_B = [tf.bool]
41TYPE_FIB = [tf.float32, tf.int32, tf.bool]
42TYPE_H = [tf.float16]
43TYPE_FH = [tf.float32, tf.float16]
44TYPE_FHI = [tf.float32, tf.float16, tf.int32]
45TYPE_FHIB = [tf.float32, tf.float16, tf.int32, tf.bool]
46
47# The list of operator tests
48# Each dictionary entry for an op is a dictionary with the following required members:
49# 'operands': tuple (number_of_placeholder_tensors, number_of_constant_tensors)
50# 'build_fcn: tuple (Test builder function, Tensor generator function,
51# Argument generator function)
52# 'types': list of Tensorflow types that should be tested for this op
53# OR
54# a dictionary of {'framework_name': [type_list] } for cases where only
55# a subset of the types should be tested in each framework. This can also
56# be used to restrict an operator to a particular framework.
57#
58# And optional members:
59# 'template': boolean (indicates that this is a templated op which gets further
60# processing in createDynamicOpLists)
61# 'bias': boolean indicating that there is a bias component to be generated
62# 'qtypes': List of QuantType quantized types to generate for this op
TatWai Chongfd629052022-07-25 04:01:58 +000063# 'rank': tuple (lowest rank, highest rank). Dimension range of input tensor.
Jeremy Johnson015c3552022-02-23 12:15:03 +000064
65TF_OP_LIST = {
66 "add": {
67 "operands": (2, 0),
68 "build_fcn": (TBuilder.Add, TGen.tgBFuzz, ArgGen.agNone),
69 "types": {
70 "tf": TYPE_FI,
71 "tflite": list(
72 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
73 ),
74 },
75 },
76 "sub": {
77 "operands": (2, 0),
78 "build_fcn": (TBuilder.Sub, TGen.tgBFuzz, ArgGen.agNone),
79 "types": {
80 "tf": TYPE_FI,
81 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
82 # QuantType.ALL_I16 fail in TFLite conversion
83 },
84 },
85 "mul": {
86 "operands": (2, 0),
87 "build_fcn": (TBuilder.Mul, TGen.tgBFuzz, ArgGen.agNone),
88 "types": {
89 "tf": TYPE_FI,
90 "tflite": list(
91 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
92 ),
93 },
94 },
95 "exp": {
96 "operands": (1, 0),
97 "build_fcn": (TBuilder.Exp, TGen.tgBasic, ArgGen.agNone),
98 "types": TYPE_F,
99 },
100 "rcp": {
101 "operands": (1, 0),
102 "build_fcn": (TBuilder.Rcp, TGen.tgBasic, ArgGen.agNone),
103 "types": TYPE_F,
104 },
105 "relu": {
106 "operands": (1, 0),
107 "build_fcn": (TBuilder.Relu, TGen.tgBasic, ArgGen.agNone),
108 "types": {
109 "tf": TYPE_F,
110 "tflite": list(
111 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
112 ),
113 },
114 },
Jerry Ge93912432022-07-22 10:29:13 -0700115 "relu1": {
116 "operands": (1, 0),
117 "build_fcn": (TBuilder.Relu1, TGen.tgBasic, ArgGen.agNone),
118 "types": {
119 "tf": [],
120 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
121 },
122 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000123 "relu6": {
124 "operands": (1, 0),
125 "build_fcn": (TBuilder.Relu6, TGen.tgBasic, ArgGen.agNone),
126 "types": {
127 "tf": TYPE_F,
128 "tflite": list(
129 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
130 ),
131 },
132 },
133 "leaky_relu": {
134 "operands": (1, 0),
135 "build_fcn": (TBuilder.LeakyRelu, TGen.tgBasic, ArgGen.agFloat),
136 "types": {
137 "tf": TYPE_F,
138 "tflite": list(
139 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
140 ),
141 },
142 },
TatWai Chong473eb382022-08-02 04:21:30 +0000143 "gelu": {
144 "operands": (1, 0),
145 "build_fcn": (TBuilder.Gelu, TGen.tgBasic, ArgGen.agNone),
146 "types": {
147 # Need compiler support for tf.Erf.
148 # "tf": TYPE_F,
149 "tflite": list(
150 # Only float32, int8 and uint8 supported currently
151 TYPE_F
152 + [QuantType.ALL_U8, QuantType.ALL_I8]
153 ),
154 },
155 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000156 "concat": {
157 "operands": (2, 0),
158 "build_fcn": (TBuilder.Concat, TGen.tgBasic, ArgGen.agAxes),
159 "types": TYPE_FI,
160 },
161 "bitwise_and": {
162 "operands": (2, 0),
163 "build_fcn": (TBuilder.BitwiseAnd, TGen.tgBFuzz, ArgGen.agNone),
164 "types": {"tf": TYPE_I}, # Not supported in TF Lite
165 },
166 "bitwise_or": {
167 "operands": (2, 0),
168 "build_fcn": (TBuilder.BitwiseOr, TGen.tgBFuzz, ArgGen.agNone),
169 "types": {"tf": TYPE_I}, # Not supported in TF Lite
170 },
171 "bitwise_not": {
172 "operands": (1, 0),
173 "build_fcn": (TBuilder.BitwiseNot, TGen.tgBFuzz, ArgGen.agNone),
174 "types": {"tf": TYPE_I}, # Not supported in TF Lite
175 },
176 "bitwise_xor": {
177 "operands": (2, 0),
178 "build_fcn": (TBuilder.BitwiseXor, TGen.tgBFuzz, ArgGen.agNone),
179 "types": {"tf": TYPE_I}, # Not supported in TF Lite
180 },
181 "logical_and": {
182 "operands": (2, 0),
183 "build_fcn": (TBuilder.LogicalAnd, TGen.tgBFuzz, ArgGen.agNone),
184 "types": TYPE_B,
185 },
186 "logical_or": {
187 "operands": (2, 0),
188 "build_fcn": (TBuilder.LogicalOr, TGen.tgBFuzz, ArgGen.agNone),
189 "types": TYPE_B,
190 },
191 "logical_not": {
192 "operands": (1, 0),
193 "build_fcn": (TBuilder.LogicalNot, TGen.tgBFuzz, ArgGen.agNone),
194 "types": TYPE_B,
195 },
196 "reduce_any": {
197 "operands": (1, 0),
198 "build_fcn": (TBuilder.ReduceAny, TGen.tgBasic, ArgGen.agAxesListKeepdims),
199 "types": TYPE_B,
200 },
201 "reduce_all": {
202 "operands": (1, 0),
203 "build_fcn": (TBuilder.ReduceAll, TGen.tgBasic, ArgGen.agAxesListKeepdims),
204 "types": {"tf": TYPE_B},
205 },
206 "reduce_min": {
207 "operands": (1, 0),
208 "build_fcn": (TBuilder.ReduceMin, TGen.tgBasic, ArgGen.agAxesListKeepdims),
209 "types": {
210 "tf": TYPE_FI,
211 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
212 },
213 },
214 "reduce_max": {
215 "operands": (1, 0),
216 "build_fcn": (TBuilder.ReduceMax, TGen.tgBasic, ArgGen.agAxesListKeepdims),
217 "types": {
218 "tf": TYPE_FI,
219 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
220 },
221 },
222 "reduce_sum": {
223 "operands": (1, 0),
224 "build_fcn": (TBuilder.ReduceSum, TGen.tgBasic, ArgGen.agAxesListKeepdims),
225 "types": {
226 "tf": TYPE_F,
227 # v2 converter doesn't recognize quantized reduce_sum
228 # "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
229 "tflite": TYPE_F,
230 },
231 },
232 "reduce_mean": {
233 "operands": (1, 0),
234 "build_fcn": (TBuilder.ReduceMean, TGen.tgBasic, ArgGen.agAxesListKeepdims),
235 "types": {
236 "tf": TYPE_F,
237 "tflite": list(
238 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
239 ),
240 },
241 },
242 "reduce_product": {
243 "operands": (1, 0),
244 "build_fcn": (TBuilder.ReduceProduct, TGen.tgBasic, ArgGen.agAxesListKeepdims),
245 "types": TYPE_F,
246 },
247 "min": {
248 "operands": (2, 0),
249 "build_fcn": (TBuilder.Min, TGen.tgBFuzz, ArgGen.agNone),
250 "types": TYPE_FI,
251 },
252 "max": {
253 "operands": (2, 0),
254 "build_fcn": (TBuilder.Max, TGen.tgBFuzz, ArgGen.agNone),
255 "types": TYPE_FI,
256 },
257 "pow": {
258 "operands": (2, 0),
259 "build_fcn": (TBuilder.Pow, TGen.tgBFuzz, ArgGen.agNone),
260 # Technically, integer is supported, but only for positive exponents.
261 # Needs a random argument generator.
262 "types": TYPE_F,
263 },
264 "abs": {
265 "operands": (1, 0),
266 "build_fcn": (TBuilder.Abs, TGen.tgBasic, ArgGen.agNone),
267 "types": TYPE_F,
268 },
269 "ceil": {
270 "operands": (1, 0),
271 "build_fcn": (TBuilder.Ceil, TGen.tgBasic, ArgGen.agNone),
272 "types": TYPE_F,
273 },
274 "floor": {
275 "operands": (1, 0),
276 "build_fcn": (TBuilder.Floor, TGen.tgBasic, ArgGen.agNone),
277 "types": TYPE_F,
278 },
279 "log": {
280 "operands": (1, 0),
281 "build_fcn": (TBuilder.Log, TGen.tgBasic, ArgGen.agNone),
282 "types": TYPE_F,
283 },
284 "negate": {
285 "operands": (1, 0),
286 "build_fcn": (TBuilder.Negate, TGen.tgBasic, ArgGen.agNone),
287 "types": TYPE_F,
288 },
289 "rsqrt": {
290 "operands": (1, 0),
291 "build_fcn": (TBuilder.Rsqrt, TGen.tgBasic, ArgGen.agNone),
292 "types": TYPE_F,
293 },
294 "sigmoid": {
295 "operands": (1, 0),
296 "build_fcn": (TBuilder.Sigmoid, TGen.tgBasic, ArgGen.agNone),
297 "types": {
298 "tf": TYPE_F,
299 "tflite": list(
300 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
301 ),
302 },
303 },
304 "tanh": {
305 "operands": (1, 0),
306 "build_fcn": (TBuilder.Tanh, TGen.tgBasic, ArgGen.agNone),
307 "types": {
308 "tf": TYPE_F,
309 "tflite": list(
310 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
311 ),
312 },
313 },
314 "square": {
315 "operands": (1, 0),
316 "build_fcn": (TBuilder.Square, TGen.tgBasic, ArgGen.agNone),
317 "types": TYPE_F,
318 },
319 "squared_difference": {
320 "operands": (2, 0),
321 "build_fcn": (TBuilder.SquaredDifference, TGen.tgBFuzz, ArgGen.agNone),
322 "types": TYPE_F,
323 },
324 "equal": {
325 "operands": (2, 0),
326 "build_fcn": (TBuilder.Equal, TGen.tgBFuzz, ArgGen.agNone),
327 "types": TYPE_FI,
328 },
329 "greater_equal": {
330 "operands": (2, 0),
331 "build_fcn": (TBuilder.GreaterEqual, TGen.tgBFuzz, ArgGen.agNone),
332 "types": TYPE_FI,
333 },
334 "greater": {
335 "operands": (2, 0),
336 "build_fcn": (TBuilder.Greater, TGen.tgBFuzz, ArgGen.agNone),
337 "types": TYPE_FI,
338 },
339 "less": {
340 "operands": (2, 0),
341 "build_fcn": (TBuilder.Less, TGen.tgBFuzz, ArgGen.agNone),
342 "types": TYPE_FI,
343 },
344 "less_equal": {
345 "operands": (2, 0),
346 "build_fcn": (TBuilder.LessEqual, TGen.tgBFuzz, ArgGen.agNone),
347 "types": TYPE_FI,
348 },
349 "conv2d_TEMPLATE": {
350 "operands": (1, 1),
351 "build_fcn": (TBuilder.Conv2d, TGen.tgConv2d, ArgGen.agConv2d),
352 "types": {
353 "tf": [tf.float32],
354 "tflite": [
355 tf.float32,
356 QuantType.CONV_U8_U8,
357 QuantType.CONV_I8_I8,
358 QuantType.CONV_I16_I8,
359 ],
360 },
361 "template": True,
362 },
363 "conv2d_relu_TEMPLATE": {
364 "operands": (1, 2),
365 "build_fcn": (TBuilder.Conv2dRelu, TGen.tgConv2d, ArgGen.agNone),
366 "types": {
367 "tf": [tf.float32],
368 "tflite": [
369 tf.float32,
370 QuantType.CONV_U8_U8,
371 QuantType.CONV_I8_I8,
372 QuantType.CONV_I16_I8,
373 ],
374 },
375 "template": True,
376 },
377 "conv2d_relu6_TEMPLATE": {
378 "operands": (1, 2),
379 "build_fcn": (TBuilder.Conv2dRelu6, TGen.tgConv2d, ArgGen.agNone),
380 "types": {
381 "tf": [tf.float32],
382 "tflite": [
383 tf.float32,
384 QuantType.CONV_U8_U8,
385 QuantType.CONV_I8_I8,
386 QuantType.CONV_I16_I8,
387 ],
388 },
389 "template": True,
390 },
391 "conv2d_relu_n1_to_1_TEMPLATE": {
392 "operands": (1, 2),
393 "build_fcn": (TBuilder.Conv2dReluN1To1, TGen.tgConv2d, ArgGen.agNone),
394 "types": {
395 "tf": [tf.float32],
396 "tflite": [
397 tf.float32,
398 QuantType.CONV_U8_U8,
399 QuantType.CONV_I8_I8,
400 QuantType.CONV_I16_I8,
401 ],
402 },
403 "template": True,
404 },
405 # This test is converted as:
406 # tfl.conv2d(){fused_activation_function="NONE"} + tfl.tanh()
407 # TODO: anyway to generate tfl.conv2d(){fused_activation_function="TANH"}?
408 "conv2d_tanh_TEMPLATE": {
409 "operands": (1, 2),
410 "build_fcn": (TBuilder.Conv2dTanh, TGen.tgConv2d, ArgGen.agNone),
411 "types": {
412 "tf": [tf.float32],
413 "tflite": [
414 tf.float32,
415 QuantType.CONV_U8_U8,
416 QuantType.CONV_I8_I8,
417 QuantType.CONV_I16_I8,
418 ],
419 },
420 "template": True,
421 },
422 "conv2d_bias_TEMPLATE": {
423 "operands": (1, 2),
424 "build_fcn": (TBuilder.Conv2dWithBias, TGen.tgConv2d, ArgGen.agConv2d),
425 "types": {
426 "tf": [tf.float32],
427 "tflite": [
428 tf.float32,
429 QuantType.CONV_U8_U8,
430 QuantType.CONV_I8_I8,
431 QuantType.CONV_I16_I8,
432 ],
433 },
434 "bias": True,
435 "template": True,
436 },
TatWai Chongfd629052022-07-25 04:01:58 +0000437 "conv3d_TEMPLATE": {
438 "operands": (1, 1),
439 "build_fcn": (TBuilder.Conv3d, TGen.tgConv3d, ArgGen.agConv3d),
440 "types": {
441 "tf": [tf.float32],
442 "tflite": [
443 tf.float32,
444 QuantType.CONV_U8_U8,
445 QuantType.CONV_I8_I8,
446 # Quantization to 16x8-bit not yet supported by tflite.
447 ],
448 },
449 "template": True,
450 "rank": (1, 5),
451 },
452 "conv3d_bias_TEMPLATE": {
453 "operands": (1, 2),
454 "build_fcn": (TBuilder.Conv3dWithBias, TGen.tgConv3d, ArgGen.agConv3d),
455 "types": {
456 "tf": [tf.float32],
457 "tflite": [
458 tf.float32,
459 QuantType.CONV_U8_U8,
460 QuantType.CONV_I8_I8,
461 # Quantization to 16x8-bit not yet supported by tflite.
462 ],
463 },
464 "bias": True,
465 "template": True,
466 "rank": (1, 5),
467 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000468 "depthwise_conv2d_TEMPLATE": {
469 "operands": (1, 1),
470 "build_fcn": (
471 TBuilder.DepthwiseConv2d,
472 TGen.tgDepthwiseConv2d,
473 ArgGen.agDepthwiseConv2d,
474 ),
475 "types": {
476 "tf": [tf.float32],
477 "tflite": [
478 tf.float32,
479 QuantType.CONV_U8_U8,
480 QuantType.CONV_I8_I8,
481 QuantType.CONV_I16_I8,
482 ],
483 },
484 "template": True,
485 },
486 "depthwise_conv2d_bias_TEMPLATE": {
487 "operands": (1, 2),
488 "build_fcn": (
489 TBuilder.DepthwiseConv2dWithBias,
490 TGen.tgDepthwiseConv2d,
491 ArgGen.agDepthwiseConv2d,
492 ),
493 "types": {
494 "tf": [tf.float32],
495 "tflite": [
496 tf.float32,
497 QuantType.CONV_U8_U8,
498 QuantType.CONV_I8_I8,
499 QuantType.CONV_I16_I8,
500 ],
501 },
502 "bias": True,
503 "template": True,
504 },
505 "transpose_conv2d_TEMPLATE": {
506 "operands": (1, 1),
507 "build_fcn": (
508 TBuilder.TransposeConv2d,
509 TGen.tgTransposeConv2d,
510 ArgGen.agTransposeConv2d,
511 ),
512 "types": {
513 "tf": [tf.float32],
514 "tflite": [
515 tf.float32,
516 QuantType.CONV_U8_U8,
517 QuantType.CONV_I8_I8,
518 QuantType.CONV_I16_I8,
519 ],
520 },
521 "template": True,
522 },
523 "argmax": {
524 "operands": (1, 0),
525 "build_fcn": (TBuilder.Argmax, TGen.tgBasic, ArgGen.agAxes),
526 "types": {"tf": TYPE_F},
527 },
528 "avg_pool2d": {
529 "operands": (1, 0),
530 "build_fcn": (TBuilder.AvgPool2d, TGen.tgPooling, ArgGen.agPooling),
531 "types": {
532 "tf": TYPE_F,
533 "tflite": list(
534 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
535 ),
536 },
537 },
538 "max_pool2d": {
539 "operands": (1, 0),
540 "build_fcn": (TBuilder.MaxPool2d, TGen.tgPooling, ArgGen.agPooling),
541 "types": {
542 "tf": TYPE_F,
543 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
544 # ALL_I16 not supported yet
545 # In tensorflow/compiler/mlir/lite/ir/tfl_ops.td,
546 # QI16 is missing from MaxPoolOperandAndResultConstraints
547 # If adding QI16 back this test can run through.
548 },
549 },
550 "reshape": {
551 "operands": (1, 0),
552 "build_fcn": (TBuilder.Reshape, TGen.tgBasic, ArgGen.agReshape),
553 "types": TYPE_FI,
554 },
555 "transpose": {
556 "operands": (1, 0),
557 "build_fcn": (TBuilder.Transpose, TGen.tgBasic, ArgGen.agTranspose),
558 "types": TYPE_FI,
559 },
560 "slice": {
561 "operands": (1, 0),
562 "build_fcn": (TBuilder.Slice, TGen.tgBasic, ArgGen.agSlice),
563 "types": TYPE_FI,
564 },
565 "strided_slice": {
566 "operands": (1, 0),
567 "build_fcn": (TBuilder.StridedSlice, TGen.tgBasic, ArgGen.agStridedSlice),
568 "types": TYPE_FI,
569 },
570 "select": {
571 "operands": (3, 0),
572 "build_fcn": (TBuilder.Select, TGen.tgSelect, ArgGen.agNone),
573 "types": TYPE_FI,
574 },
575 "addn": {
576 "operands": (4, 0),
577 "build_fcn": (TBuilder.Addn, TGen.tgBasic, ArgGen.agNone),
578 "types": TYPE_FI,
579 },
580 "concatv2": {
581 "operands": (4, 0),
582 "build_fcn": (TBuilder.Concatv2, TGen.tgBasic, ArgGen.agAxes),
583 "types": TYPE_FI,
584 },
585 "stack": {
586 "operands": (4, 0),
587 "build_fcn": (TBuilder.Stack, TGen.tgBasic, ArgGen.agStack),
588 "types": TYPE_FI,
589 },
590 "unstack": {
591 "operands": (1, 0),
592 "build_fcn": (TBuilder.Unstack, TGen.tgPooling, ArgGen.agAxes),
593 "types": TYPE_F,
594 },
595 "pad": {
596 "operands": (1, 0),
597 "build_fcn": (TBuilder.Pad, TGen.tgBasic, ArgGen.agPad),
598 "types": TYPE_F,
599 },
600 "expand_dims": {
601 "operands": (1, 0),
602 "build_fcn": (TBuilder.ExpandDims, TGen.tgBasic, ArgGen.agStack),
603 "types": TYPE_FI,
604 },
605 "shape": {
606 "operands": (1, 0),
607 "build_fcn": (TBuilder.Shape, TGen.tgBasic, ArgGen.agNone),
608 "types": TYPE_FI,
609 },
610 "rank": {
611 "operands": (1, 0),
612 "build_fcn": (TBuilder.Rank, TGen.tgBasic, ArgGen.agNone),
613 "types": TYPE_FI,
614 },
615 "fill": {
616 "operands": (1, 0),
617 "build_fcn": (TBuilder.Fill, TGen.tgBasic, ArgGen.agFill),
618 "types": TYPE_FI,
619 },
620 "elu": {
621 "operands": (1, 0),
622 "build_fcn": (TBuilder.Elu, TGen.tgBasic, ArgGen.agNone),
623 "types": TYPE_F,
624 },
625 "softmax": {
626 "operands": (1, 0),
627 "build_fcn": (TBuilder.Softmax, TGen.tgBasic, ArgGen.agNone),
628 "types": {
629 "tf": TYPE_F,
630 "tflite": list(
631 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
632 ),
633 },
634 },
635 "log_softmax": {
636 "operands": (1, 0),
637 "build_fcn": (TBuilder.LogSoftmax, TGen.tgBasic, ArgGen.agNone),
638 "types": TYPE_F,
639 },
640 "matmul": {
641 "operands": (2, 0),
642 "build_fcn": (TBuilder.MatMul, TGen.tgMatmul, ArgGen.agNone),
643 "types": {
644 "tf": TYPE_F,
645 "tflite": list(
646 TYPE_F
647 + [QuantType.ALL_U8, QuantType.ALL_I8]
648 # 16 bits matmul fail to convert
649 ),
650 },
651 },
652 "add_scalar": {
653 "operands": (1, 0),
654 "build_fcn": (TBuilder.AddScalar, TGen.tgBasic, ArgGen.agNone),
655 "types": TYPE_F,
656 },
657 "add_1d": {
658 "operands": (2, 0),
659 "build_fcn": (TBuilder.Add1d, TGen.tgBasic, ArgGen.agNone),
660 "types": TYPE_F,
661 },
662 "split": {
663 "operands": (1, 0),
664 "build_fcn": (TBuilder.Split, TGen.tgBasic, ArgGen.agSplit),
665 "types": TYPE_FI,
666 },
667 "tile": {
668 "operands": (1, 0),
669 "build_fcn": (TBuilder.Tile, TGen.tgBasic, ArgGen.agTile),
670 "types": TYPE_FI,
671 },
672 "reverse": {
673 "operands": (1, 0),
674 "build_fcn": (TBuilder.Reverse, TGen.tgBasic, ArgGen.agAxes),
675 "types": {"tf": TYPE_FI},
676 },
677 "gather": {
678 "operands": (1, 0),
679 "build_fcn": (TBuilder.Gather, TGen.tgBasic, ArgGen.agGather),
680 "types": TYPE_FI,
681 },
682 "gather_nd": {
683 "operands": (1, 0),
684 "build_fcn": (TBuilder.GatherNd, TGen.tgBasic, ArgGen.agGatherND),
685 "types": TYPE_FI,
686 },
687 "scatter_nd": {
688 "operands": (1, 0),
689 "build_fcn": (TBuilder.ScatterNd, TGen.tgBasic, ArgGen.agScatterND),
690 "types": TYPE_FI,
691 },
692 "space_to_batch": {
693 "operands": (1, 0),
694 "build_fcn": (TBuilder.SpaceToBatch, TGen.tgBasic, ArgGen.agSpaceToBatch),
695 "types": TYPE_F,
696 },
697 "batch_to_space": {
698 "operands": (1, 0),
699 "build_fcn": (TBuilder.BatchToSpace, TGen.tgBasic, ArgGen.agBatchToSpace),
700 "types": TYPE_F,
701 },
702 "space_to_depth": {
703 "operands": (1, 0),
704 "build_fcn": (TBuilder.SpaceToDepth, TGen.tgBasic, ArgGen.agSpaceToDepth),
705 "types": TYPE_F,
706 },
707 "depth_to_space": {
708 "operands": (1, 0),
709 "build_fcn": (TBuilder.DepthToSpace, TGen.tgBasic, ArgGen.agDepthToSpace),
710 "types": TYPE_F,
711 },
712 "one_hot": {
713 "operands": (3, 1),
714 "build_fcn": (TBuilder.OneHot, TGen.tgOneHot, ArgGen.agOneHot),
715 "types": TYPE_FI,
716 },
717 "fakequant": {
718 "operands": (1, 0),
719 "build_fcn": (
720 TBuilder.Fakequant,
721 TGen.tgBasic,
722 ArgGen.agFakequant,
723 ),
724 "types": {"tf": TYPE_F},
725 },
726 "resize_nearest": {
727 "operands": (1, 0),
728 "build_fcn": (TBuilder.ResizeNearest, TGen.tgPooling, ArgGen.agNone),
729 "types": {
730 "tf": TYPE_F,
731 "tflite": list(
732 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
733 ),
734 },
735 },
736 "resize_bilinear": {
737 "operands": (1, 0),
738 "build_fcn": (TBuilder.ResizeBilinear, TGen.tgPooling, ArgGen.agNone),
739 "types": {
740 "tf": TYPE_F,
741 "tflite": list(
742 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
743 ),
744 },
745 },
TatWai Chongf7326092022-06-08 12:17:14 -0700746 "resize_bilinear_v1_align_corners": {
747 "operands": (1, 0),
748 "build_fcn": (
749 TBuilder.ResizeBilinearV1AlignCorners,
750 TGen.tgPooling,
751 ArgGen.agNone,
752 ),
753 "types": {
754 "tf": TYPE_F,
755 "tflite": list(
756 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
757 ),
758 },
759 },
760 "resize_bilinear_v1_none": {
761 "operands": (1, 0),
762 "build_fcn": (TBuilder.ResizeBilinearV1None, TGen.tgPooling, ArgGen.agNone),
763 "types": {
764 "tf": TYPE_F,
765 "tflite": list(
766 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
767 ),
768 },
769 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000770 "left_shift": {
771 "operands": (1, 0),
772 "build_fcn": (TBuilder.LeftShift, TGen.tgBasic, ArgGen.agShift),
773 "types": {"tf": [tf.int32]},
774 },
775 "right_shift": {
776 "operands": (1, 0),
777 "build_fcn": (TBuilder.RightShift, TGen.tgBasic, ArgGen.agShift),
778 "types": {
779 "tf": [
780 tf.int32,
781 ]
782 },
783 },
784}
785
786# Shapes to be tested; default can be overwritten
787shape_list = [
788 (1,),
789 (64,),
790 (14, 19),
791 (13, 21, 3),
792 (1, 4, 4, 4),
793 (1, 8, 4, 17),
794 (1, 4, 8, 19),
795 (1, 32, 32, 8),
796 (1, 7, 7, 9),
TatWai Chongfd629052022-07-25 04:01:58 +0000797 (2, 2, 7, 7, 2),
798 (1, 4, 8, 21, 17),
799 (3, 32, 16, 16, 5),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000800]
801
802
803def gen_rand_shapes(args):
804 """Overwrite the global shape list with a new list of random shapes"""
805 global shape_list
806
807 rng = np.random.default_rng(args.random_seed)
808
809 # Don't let things get too big... cap the maximum volume, but let
810 # an individual dimension be 1..47
811 max_total_volume = 32 * 32 * 4
812
813 shape_list = []
TatWai Chongfd629052022-07-25 04:01:58 +0000814 # Only iterate over ranks 2, 3, 4, and 5
815 for rank in range(2, 6):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000816 for n in range(args.random_shapes):
817 new_shape = rng.integers(1, 48, size=rank)
818
TatWai Chongfd629052022-07-25 04:01:58 +0000819 # Set the batch dimension on 4D or 5D objects to 1
820 if rank == 4 or rank == 5:
Jeremy Johnson015c3552022-02-23 12:15:03 +0000821 new_shape[0] = 1
822
823 # Limit the total shape volume and throw out any
824 # shapes that wouldn't leave at least size=2 in some non-batch dimension
825 volume = 1
826 skip_shape = False
827 for i in range(rank):
828
829 volume *= new_shape[i]
830
831 # Reduce the shape, while it's larger than the maximum volume
832 while volume > max_total_volume:
833 new_shape[i] = new_shape[i] // 2
834 volume = volume // 2
835
836 # Now an untenable dimension size? Skip this one.
837 if new_shape[i] < 1:
838 skip_shape = True
839
840 if not skip_shape:
841 shape_list.append(tuple(new_shape))
842
843
844# Construct, run and save a whole tensorflow tf.function to a protobuf file
845# or convert to .tflite if it's quantized unit test
846def run_unit_test(
847 op_name,
848 args,
849 test_dir,
850 curr_shape,
851 addl_args,
852 dtype,
853 excluded_framework_list,
854 quantized_inference_dtype,
855 result_name,
856 seed,
857):
858
859 try:
860 op = TF_OP_LIST[op_name]
861 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
862
863 # Get and seed a random number generator for this test
864 rng = np.random.default_rng(seed)
865
866 # return placeholders=(str: name, np.array: value)
867 # consts=(str: name, np.array: value)
868 placeholders, consts = tensor_gen_fcn(op, curr_shape, dtype, rng)
869
870 # if test doesn't have any placeholders/consts, terminated
871 if len(placeholders) == 0 and len(consts) == 0:
872 return True
873
874 if not args.quiet:
875 print(" {} ".format(test_dir))
876
877 try:
878 os.mkdir(test_dir)
879 except FileExistsError:
880 pass
881
882 const_nodes = [value for name, value in consts]
883
884 num_placeholders = len(placeholders)
885 # if test is quantized, create tensor quantization metadata info for
886 # each input tensor, based on different quantized type
887 if quantized_inference_dtype:
888 is_quantized = True
889 # TODO: support INT8 IFM x INT4 weight later
890 if quantized_inference_dtype == QuantType.ALL_U8:
891 qzero = [128] * num_placeholders
892 numpy_dtype = [np.uint8] * num_placeholders
893 tflite_inference_dtype = tf.uint8
894 elif quantized_inference_dtype == QuantType.ALL_I8:
895 qzero = [0] * num_placeholders
896 numpy_dtype = [np.int8] * num_placeholders
897 tflite_inference_dtype = tf.int8
898 elif quantized_inference_dtype == QuantType.ALL_I16:
899 qzero = [0] * num_placeholders
900 numpy_dtype = [np.int16] * num_placeholders
901 tflite_inference_dtype = tf.int16
902 elif quantized_inference_dtype == QuantType.CONV_U8_U8:
903 assert (
904 num_placeholders == 1
905 ), "Unsupported number of placeholders for Convolution: {}".format(
906 num_placeholders
907 )
908 qzero = [128] * num_placeholders
909 if num_placeholders == 2:
910 numpy_dtype = [np.uint8, np.uint8]
911 else:
912 numpy_dtype = [np.uint8, np.uint8, np.int32]
913 tflite_inference_dtype = tf.uint8
914 elif quantized_inference_dtype == QuantType.CONV_I8_I8:
915 assert (
916 num_placeholders == 1
917 ), "Unsupported number of placeholders for Convolution: {}".format(
918 num_placeholders
919 )
920 qzero = [0] * num_placeholders
921 if num_placeholders == 2:
922 numpy_dtype = [np.int8, np.int8]
923 else:
924 numpy_dtype = [np.int8, np.int8, np.int32]
925 tflite_inference_dtype = tf.int8
926 elif quantized_inference_dtype == QuantType.CONV_I16_I8:
927 assert (
928 num_placeholders == 1
929 ), "Unsupported number of placeholders for Convolution: {}".format(
930 num_placeholders
931 )
932 if num_placeholders == 2:
933 qzero = [0, 0]
934 numpy_dtype = [np.int16, np.int8]
935 else:
936 qzero = [0, 0, 0]
937 numpy_dtype = [
938 np.int16,
939 np.int8,
940 np.int64,
941 ] # np.int64 to represent 40 bits accumulator
942 tflite_inference_dtype = tf.int16
943 else:
944 raise Exception(
945 "Unsupported fakequant dtype: {}".format(quantized_inference_dtype)
946 )
947
948 else:
949 is_quantized = False
950
951 tf_model_filename = None
952 tf_result_npy_filename = None
953 tf_result_name = None
954
955 tflite_model_filename = None
956 tflite_result_npy_filename = None
957 tflite_result_name = None
958
959 placeholder_names = []
960 placeholder_vals = []
961 placeholder_signatures = ()
962 placeholder_npy_filenames = []
963 placeholder_shapes = []
964
965 for idx, (name, val) in enumerate(placeholders):
966 placeholder_names.append(name)
967 placeholder_signatures = placeholder_signatures + (
968 tf.TensorSpec(shape=val.shape, dtype=val.dtype, name=name),
969 )
970 placeholder_npy_filenames.append("{}.npy".format(name.split(":")[0]))
971 placeholder_shapes.append(val.shape)
972
973 # Get test builder class
974 fcn_node = op_fcn(*const_nodes, *addl_args, result_name)
975 concrete_function = tf.function(input_signature=placeholder_signatures)(
976 fcn_node.eval
977 ).get_concrete_function()
978
979 if is_quantized:
980
981 assert dtype is tf.float32, "quantized test must come from float32 graph"
982
983 # 1. Quantize float placeholder npy to quantized to feed the graph
984 for idx, (name, val) in enumerate(placeholders):
985
986 # we use np.amin()/np.amax() to determine dynamic range
987 # for quantized test
988 zeropoint = 0
989 scale = 1.0
990 if numpy_dtype[idx] != np.int64:
991 qmin = np.iinfo(numpy_dtype[idx]).min
992 qmax = np.iinfo(numpy_dtype[idx]).max
993 num_bits = np.iinfo(numpy_dtype[idx]).bits
994 # 40 bit is represented as np.int64
995 else:
996 num_bits = 40
997 qmin = -(1 << num_bits)
998 qmax = (1 << num_bits) - 1
999
1000 min_val = np.amin(val)
1001 max_val = np.amax(val)
1002
1003 # for single value tensor, we set scale equal to the abs(value),
1004 # and fix zeropoint to 128
1005 # if val > 0, it'll be represented as 129,
1006 # where val = (129 - 128) * val
1007 # if val < 0, it'll be represented as 127,
1008 # where val = (127 - 128) * (-val)
1009 # if val == 0, it'll be represted as 128, with range [-128.0, 128.0]
1010 # and let quantized 1 represent the value
1011 # also adjust effective min/max consequently
1012 if max_val == min_val:
1013 if max_val != 0:
1014 scale = abs(max_val)
1015 else:
1016 scale = 1.0
1017 min_val = float(qmin - qzero[idx]) * scale
1018 max_val = float(qmax - qzero[idx]) * scale
1019 else:
1020 scale = (max_val - min_val) / float(qmax - qmin)
1021 zeropoint = int(round((-min_val) / scale)) + qmin
1022
1023 # run through tf.fakequant first to assure quantization error aligned
1024 fakequant_val = tf.quantization.fake_quant_with_min_max_args(
1025 val,
1026 min=min_val,
1027 max=max_val,
1028 num_bits=num_bits,
1029 name="gen_quant_npy",
1030 )
1031
1032 quant_val = np.round(fakequant_val / scale).astype(np.int32) + zeropoint
1033
1034 # very few unit tests after TF hash may/2020, this quantized
1035 # value for some reason exceed [0, 255] range
1036 saved_val = np.clip(quant_val, qmin, qmax).astype(numpy_dtype[idx])
1037
1038 # saved all quantized tensor as np.int32
1039 # since TOSA numpy Cpp API only supports int32
1040 np.save(
1041 os.path.join(test_dir, placeholder_npy_filenames[idx]),
1042 saved_val.astype(np.int32),
1043 False,
1044 )
1045
1046 placeholder_vals.append(tf.convert_to_tensor(saved_val))
1047
1048 # 2. Convert the model to quantized TFLite flatbuffer
1049 module = tf.Module()
1050 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1051 [concrete_function], module
1052 )
1053 converter.optimizations = [tf.lite.Optimize.DEFAULT]
1054 converter.experimental_new_converter = True
1055
1056 # use MLIR-based post-quantizer
1057 converter.experimental_new_quantizer = True
1058
1059 flag = (
1060 tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 # noqa: E501
1061 )
1062 if tflite_inference_dtype == tf.int16:
1063 converter.target_spec.supported_ops = [flag]
1064
1065 def input_stats():
1066 for i in range(0, args.num_samples):
1067 a = [
1068 TGen.getRand(shape, tf.float32, rng)
1069 for shape in placeholder_shapes
1070 ]
1071 yield a
1072
1073 converter.representative_dataset = input_stats
1074 converter.inference_input_type = tflite_inference_dtype
1075 converter.inference_output_type = tflite_inference_dtype
1076
1077 tflite_model = converter.convert()
1078
1079 tflite_model_filename = "model.tflite"
1080
1081 # Write out converted model to disk
1082 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1083 f.write(tflite_model)
1084
1085 else: # is_quantized is False
1086
1087 # 1. Saved out numpy array directly
1088 for idx, (name, val) in enumerate(placeholders):
1089 placeholder_vals.append(tf.convert_to_tensor(val))
1090 np.save(
1091 os.path.join(test_dir, placeholder_npy_filenames[idx]), val, False
1092 )
1093
1094 # 2.a Saved out .pb if framework includes tensorflow
1095 if "tf" not in excluded_framework_list:
1096 # Write out graph as protobuf to disk
1097 tf_model_filename = "model.pb"
1098 tf.io.write_graph(
1099 concrete_function.graph, test_dir, tf_model_filename, True
1100 )
1101
1102 # 2.b Saved out .tflite if framework includes tflite
1103 if "tflite" not in excluded_framework_list:
1104 # Convert the model to TFLite flatbuffer
1105 module = tf.Module()
1106 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1107 [concrete_function], module
1108 )
1109
1110 converter.experimental_new_converter = True
1111
1112 # Even it's non-quantized int32 test, this needs to be set to tf.float32
1113 converter.inference_input_type = tf.float32
1114 converter.inference_output_type = tf.float32
1115 tflite_model = converter.convert()
1116
1117 # Write out converted model to disk
1118 tflite_model_filename = "model.tflite"
1119 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1120 f.write(tflite_model)
1121
1122 # Get TF reference result if .pb is specified
1123 if tf_model_filename:
1124 tf_result_npy_filename = "tf_result.npy"
1125 tf_result = concrete_function(*placeholder_vals)
1126 np.save(os.path.join(test_dir, tf_result_npy_filename), tf_result, False)
1127
1128 tf_result_name = result_name
1129
1130 # Get TFLite inference result if .tflite is specified
1131 if tflite_model_filename:
1132 tflite_result_npy_filename = "tflite_result.npy"
1133
1134 ops_with_optimized_only_kernel = ["elu", "ceil", "gather"]
1135
1136 if args.tflite_kernel_mode == "optimized" or (
1137 op_name in ops_with_optimized_only_kernel
1138 ):
1139 interpreter = tf.lite.Interpreter(
1140 model_path=os.path.join(test_dir, tflite_model_filename)
1141 )
1142 elif args.tflite_kernel_mode == "reference":
1143 interpreter = tf.lite.Interpreter(
1144 model_path=os.path.join(test_dir, tflite_model_filename),
1145 experimental_op_resolver_type=OpResolverType.BUILTIN_REF,
1146 )
1147 else:
1148 assert 0, "unknown tflite interpreter mode {}".format(
1149 args.tflite_kernel_mode
1150 )
1151 interpreter.allocate_tensors()
1152
1153 input_details = interpreter.get_input_details()
1154 output_details = interpreter.get_output_details()
1155
1156 assert len(input_details) == len(
1157 placeholder_vals
1158 ), "number of placeholder mismatch"
1159
1160 for idx, val in enumerate(placeholder_vals):
1161 interpreter.set_tensor(input_details[idx]["index"], val.numpy())
1162
1163 interpreter.invoke()
1164 tflite_result = interpreter.get_tensor(output_details[0]["index"])
1165
1166 np.save(
1167 os.path.join(test_dir, tflite_result_npy_filename), tflite_result, False
1168 )
1169
1170 # Result tensor name would change after converting to TFLite flatbuffer
1171 # Overwrite the information from TFLite models directly.
1172 # Assume single result tensor now
1173 tflite_result_name = output_details[0]["name"]
1174
1175 # Write out test descriptor
1176 write_test_json(
1177 filename=os.path.join(test_dir, "test.json"),
1178 tf_model_filename=tf_model_filename,
1179 tf_result_npy_filename=tf_result_npy_filename,
1180 tf_result_name=tf_result_name,
1181 tflite_model_filename=tflite_model_filename,
1182 tflite_result_npy_filename=tflite_result_npy_filename,
1183 tflite_result_name=tflite_result_name,
1184 ifm_name=placeholder_names,
1185 ifm_file=placeholder_npy_filenames,
1186 ifm_shape=placeholder_shapes,
1187 framework_exclusions=excluded_framework_list,
1188 quantized=is_quantized,
1189 )
1190 except Exception as e:
1191 msg = "Error running task: {}".format(e)
1192 print(msg)
1193 print(
1194 "".join(
1195 traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)
1196 )
1197 )
1198 return False
1199 return True
1200
1201
1202def build_const_net(
1203 args,
1204 curr_shape,
1205 op_name,
1206 dtype,
1207 excluded_framework_list,
1208 quantized_inference_dtype,
1209 result_name,
1210 seed,
1211 rng,
1212 filter,
1213 unit_test_args,
1214):
1215
1216 if quantized_inference_dtype:
1217 quant_dtype = get_tf_dtype(quantized_inference_dtype)
1218 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, quant_dtype))
1219 else:
1220 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, dtype))
1221 test_dir = os.path.join(args.output_dir, test_dir)
1222
1223 # If the operator has an additional function to generate arguments, call it
1224 # here and iterate through the argument list that it generates
1225 op = TF_OP_LIST[op_name]
1226 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1227
TatWai Chongfd629052022-07-25 04:01:58 +00001228 try:
1229 rank_lo, rank_hi = op["rank"]
1230 except KeyError:
1231 # Set testing rank to (1, 4) in default.
1232 rank_lo = 1
1233 rank_hi = 4
1234
1235 if len(curr_shape) not in range(rank_lo, rank_hi + 1):
1236 return
1237
Jeremy Johnson015c3552022-02-23 12:15:03 +00001238 addl_args_tuple = arg_gen_fcn(op, curr_shape, rng)
1239 for desc, addl_args in addl_args_tuple:
Jeremy Johnson0e6218e2022-05-05 17:08:04 +01001240 # Only filter on the full test_name, not the output directory
1241 _, test_name = os.path.split(test_dir + desc)
1242 if not filter or filter.search(test_name):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001243 unit_test_args.append(
1244 [
1245 op_name,
1246 args,
1247 test_dir + desc,
1248 curr_shape,
1249 addl_args,
1250 dtype,
1251 excluded_framework_list,
1252 quantized_inference_dtype,
1253 result_name,
1254 seed,
1255 ]
1256 )
1257
1258
1259# python hash is not reproducible, create hash for our purpose
1260def op_name_hash(op_name):
1261 result = 0xDEADBEEF
1262 for ch in op_name:
1263 if result & 1:
1264 result = (ord(ch) << 24) ^ (result >> 1) ^ 0x82608EDB
1265 else:
1266 result = (ord(ch) << 24) ^ (result >> 1)
1267
1268 return result
1269
1270
1271def generate_op_tests(args, op_name, shape_list, result_name, filter, unit_test_args):
1272
1273 if not args.quiet:
1274 print(
1275 "Generating tests for {} ".format(
1276 op_name
1277 )
1278 )
1279
1280 op = TF_OP_LIST[op_name]
1281
1282 # Seed the RNG so that we get the same random tests for each test each time
1283 # If the number of tests for a given generation function changes, the tests
1284 # for that operator may also change accordingly, but this will at least keep
1285 # down churn across operators.
1286
1287 bounded_hash_val = (args.random_seed + op_name_hash(op_name)) % np.iinfo(
1288 np.int32
1289 ).max
1290 rng = np.random.default_rng(bounded_hash_val)
1291
1292 # this is a dictionary with 'tf' and 'tflite' as key
1293 # and value being the data types we want to test under these framework
1294
1295 if isinstance(op["types"], dict):
1296 try:
1297 tf_dtypes = op["types"]["tf"]
1298 except KeyError:
1299 tf_dtypes = []
1300 try:
1301 tflite_dtypes = op["types"]["tflite"]
1302 except KeyError:
1303 tflite_dtypes = []
1304 elif isinstance(op["types"], list):
1305 tf_dtypes = op["types"]
1306 tflite_dtypes = op["types"]
1307
1308 tf_nonquantized_dtypes = tf_dtypes # tf doesn't support quantized data types
1309 tflite_quantized_dtypes = []
1310 tflite_nonquantized_dtypes = []
1311 for dtype in tflite_dtypes:
1312 if isinstance(dtype, QuantType):
1313 tflite_quantized_dtypes.append(dtype)
1314 else:
1315 tflite_nonquantized_dtypes.append(dtype)
1316
1317 nonquantized_dtypes_set = set(tf_nonquantized_dtypes).union(
1318 set(tflite_nonquantized_dtypes)
1319 )
1320 nonquantized_dtypes = list(nonquantized_dtypes_set)
1321 quantized_dtypes = tflite_quantized_dtypes
1322
1323 # populate non quantized unit test arguments
1324 for dtype in nonquantized_dtypes:
1325
1326 excluded_framework_set = set(ALL_FRAMEWORKS)
1327 if dtype in tf_nonquantized_dtypes:
1328 excluded_framework_set.remove("tf")
1329 if dtype in tflite_nonquantized_dtypes:
1330 excluded_framework_set.remove("tflite")
1331 excluded_framework_list = list(excluded_framework_set)
1332
1333 for curr_shape in shape_list:
1334 build_const_net(
1335 args,
1336 curr_shape,
1337 op_name,
1338 dtype,
1339 excluded_framework_list,
1340 None,
1341 result_name,
1342 bounded_hash_val,
1343 rng,
1344 filter,
1345 unit_test_args,
1346 )
1347
1348 # populate quantized unit test arguments
1349 # must exclude 'tf' and source dtype being tf.float32
1350 for dtype in quantized_dtypes:
1351 for curr_shape in shape_list:
1352 build_const_net(
1353 args,
1354 curr_shape,
1355 op_name,
1356 tf.float32,
1357 ["tf"],
1358 dtype,
1359 result_name,
1360 bounded_hash_val,
1361 rng,
1362 filter,
1363 unit_test_args,
1364 )
1365
1366 return unit_test_args
1367
1368
1369def createDynamicOpLists():
1370 """The templated operators are conv2d-style operators with a number of kernel
1371 sizes. Since the operator is unchanged, we generate the range of kernel
1372 sizes here in this loop and remove the original templates from the list.
1373
1374 This could be expanded to non-conv2d-style operators in the future."""
1375
1376 # Dynamically create op lists for convolutions with a list of kernel sizes
1377 KERNELS = [
1378 [1, 1],
1379 [3, 3],
1380 [5, 5],
1381 ]
1382
TatWai Chongfd629052022-07-25 04:01:58 +00001383 # dim = [D, H, W]
1384 KERNELS_3D = [
1385 [1, 1, 1],
1386 [2, 3, 3],
1387 [3, 5, 5],
1388 ]
1389
Jeremy Johnson015c3552022-02-23 12:15:03 +00001390 TEMPLATE_LIST = [
1391 "conv2d",
1392 "conv2d_bias",
1393 "conv2d_relu",
1394 "conv2d_relu6",
1395 "conv2d_relu_n1_to_1",
1396 "conv2d_tanh",
1397 "depthwise_conv2d",
1398 "depthwise_conv2d_bias",
1399 "transpose_conv2d",
1400 ]
1401
TatWai Chongfd629052022-07-25 04:01:58 +00001402 TEMPLATE_LIST_CONV3D = [
1403 "conv3d",
1404 "conv3d_bias",
1405 ]
1406
Jeremy Johnson015c3552022-02-23 12:15:03 +00001407 for t in TEMPLATE_LIST:
1408 for k in KERNELS:
1409 testName = "{}_{}x{}".format(t, k[0], k[1])
1410 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1411 TF_OP_LIST[testName]["filter"] = k
1412 TF_OP_LIST[testName]["template"] = False
1413
TatWai Chongfd629052022-07-25 04:01:58 +00001414 # The existing operators don't support the dimension of kernel that is higher than 2.
1415 for t in TEMPLATE_LIST_CONV3D:
1416 for k in KERNELS_3D:
1417 testName = "{}_{}x{}x{}".format(t, k[0], k[1], k[2])
1418 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1419 TF_OP_LIST[testName]["filter"] = k
1420 TF_OP_LIST[testName]["template"] = False
1421
Jeremy Johnson015c3552022-02-23 12:15:03 +00001422 # Delete any templates after having created any dynamic ops
1423 # This is a two-pass operation because it's bad practice to delete
1424 # keys from dictionaries while iterating
1425 keyList = []
1426 for k in TF_OP_LIST:
1427 try:
1428 if TF_OP_LIST[k]["template"]:
1429 keyList.append(k)
1430 continue
1431 except KeyError:
1432 pass
1433
1434 for k in keyList:
1435 del TF_OP_LIST[k]
1436
1437
1438def main():
1439 parser = argparse.ArgumentParser()
1440 parser.add_argument(
1441 "--seed", dest="random_seed", default=42, type=int, help="Random seed"
1442 )
1443 parser.add_argument(
1444 "--random-shapes",
1445 dest="random_shapes",
1446 default=0,
1447 type=int,
1448 help=(
1449 "Use N random shapes of each rank for generating tests,"
1450 "seeded with random seed"
1451 ),
1452 )
1453 parser.add_argument(
1454 "-o",
1455 "--output-dir",
1456 dest="output_dir",
1457 default=".",
1458 type=str,
1459 help="Test output directory path prefix",
1460 )
1461 parser.add_argument(
1462 "-q",
1463 "--quiet",
1464 dest="quiet",
1465 default=False,
1466 action="store_true",
1467 help="Do not print test names",
1468 )
1469 parser.add_argument(
1470 "-j", "--jobs", dest="jobs", type=int, default=1, help="Number of parallel jobs"
1471 )
1472 parser.add_argument(
1473 "-m",
1474 "--tflite-kernel-mode",
1475 dest="tflite_kernel_mode",
1476 type=str,
1477 choices=["reference", "optimized"],
1478 default="reference",
1479 help="TFLite interpreter kernel mode",
1480 )
1481 parser.add_argument(
1482 "--num-samples",
1483 dest="num_samples",
1484 default=200,
1485 type=int,
1486 help="Number of input samples for post-training quantization",
1487 )
1488 parser.add_argument(
1489 "--filter",
1490 dest="filter",
1491 default="",
1492 type=str,
1493 help="Filter test names by this expression",
1494 )
1495 args = parser.parse_args()
1496
1497 # Turn the filter into a re object if present
1498 filter = None
1499 if args.filter != "":
1500 filter = re.compile(args.filter)
1501
1502 # Autodetect CPU count
1503 if args.jobs <= 0:
1504 args.jobs = os.cpu_count()
1505
1506 # Disable TF info messages
1507 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
1508
1509 try:
1510 os.makedirs(args.output_dir)
1511 except FileExistsError:
1512 pass
1513
1514 if args.random_shapes:
1515 gen_rand_shapes(args)
1516
1517 # Build dynamic ops
1518 createDynamicOpLists()
1519
1520 # Generate the test list and arguments to run_unit_test()
1521 unit_test_args = []
1522
1523 for op in TF_OP_LIST:
1524 generate_op_tests(args, op, shape_list, "result", filter, unit_test_args)
1525
1526 errors = 0
1527 for t in unit_test_args:
1528 if not run_unit_test(*t):
1529 errors = errors + 1
1530
1531 if not args.quiet:
1532 print("\nAll tasks done - with {} errors".format(errors))
1533
1534 return 1 if errors else 0
1535
1536
1537if __name__ == "__main__":
1538 exit(main())