blob: 4c710bde7fad094b60727a4c0e70f979a50023d4 [file] [log] [blame]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001#!/usr/bin/env python3
2# Copyright (c) 2020-2022, ARM Limited.
3# SPDX-License-Identifier: Apache-2.0
4import argparse
5import os
6import re
7import traceback
8
9import numpy as np
10
11# Level | Level for Humans | Level Description
12# -------|------------------|------------------------------------
13# 0 | DEBUG | [Default] Print all messages
14# 1 | INFO | Filter out INFO messages
15# 2 | WARNING | Filter out INFO & WARNING messages
16# 3 | ERROR | Filter out all messages
17# Filter tensorflow debug message except errors
18os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
19
20# Flake8 E402 - ignore imports not at top of file to allow os.environ setting
21import tensorflow as tf # noqa: E402
22from frameworks.write_test_json import write_test_json # noqa: E402
23from frameworks.arg_gen import ArgGen # noqa: E402
24from frameworks.tensor_gen import TGen # noqa: E402
25from frameworks.test_builder import TBuilder # noqa: E402
Jeremy Johnson5d1a3472022-03-31 09:50:06 +010026from frameworks.test_gen_utils import ( # noqa: E402
Jeremy Johnson015c3552022-02-23 12:15:03 +000027 QuantType,
28 get_tf_dtype,
29 get_shape_str,
30) # noqa: E402
31from tensorflow.lite.python.interpreter import OpResolverType # noqa: E402
32
33# All of the supported frameworks
34ALL_FRAMEWORKS = ["tf", "tflite"]
35
36# Lists of different data types
37TYPE_F = [tf.float32]
38TYPE_I = [tf.int32]
39TYPE_FI = [tf.float32, tf.int32]
40TYPE_B = [tf.bool]
41TYPE_FIB = [tf.float32, tf.int32, tf.bool]
42TYPE_H = [tf.float16]
43TYPE_FH = [tf.float32, tf.float16]
44TYPE_FHI = [tf.float32, tf.float16, tf.int32]
45TYPE_FHIB = [tf.float32, tf.float16, tf.int32, tf.bool]
46
47# The list of operator tests
48# Each dictionary entry for an op is a dictionary with the following required members:
49# 'operands': tuple (number_of_placeholder_tensors, number_of_constant_tensors)
50# 'build_fcn: tuple (Test builder function, Tensor generator function,
51# Argument generator function)
52# 'types': list of Tensorflow types that should be tested for this op
53# OR
54# a dictionary of {'framework_name': [type_list] } for cases where only
55# a subset of the types should be tested in each framework. This can also
56# be used to restrict an operator to a particular framework.
57#
58# And optional members:
59# 'template': boolean (indicates that this is a templated op which gets further
60# processing in createDynamicOpLists)
61# 'bias': boolean indicating that there is a bias component to be generated
62# 'qtypes': List of QuantType quantized types to generate for this op
TatWai Chongfd629052022-07-25 04:01:58 +000063# 'rank': tuple (lowest rank, highest rank). Dimension range of input tensor.
Jeremy Johnson015c3552022-02-23 12:15:03 +000064
65TF_OP_LIST = {
66 "add": {
67 "operands": (2, 0),
68 "build_fcn": (TBuilder.Add, TGen.tgBFuzz, ArgGen.agNone),
69 "types": {
70 "tf": TYPE_FI,
71 "tflite": list(
72 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
73 ),
74 },
75 },
76 "sub": {
77 "operands": (2, 0),
78 "build_fcn": (TBuilder.Sub, TGen.tgBFuzz, ArgGen.agNone),
79 "types": {
80 "tf": TYPE_FI,
81 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
82 # QuantType.ALL_I16 fail in TFLite conversion
83 },
84 },
85 "mul": {
86 "operands": (2, 0),
87 "build_fcn": (TBuilder.Mul, TGen.tgBFuzz, ArgGen.agNone),
88 "types": {
89 "tf": TYPE_FI,
90 "tflite": list(
91 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
92 ),
93 },
94 },
95 "exp": {
96 "operands": (1, 0),
97 "build_fcn": (TBuilder.Exp, TGen.tgBasic, ArgGen.agNone),
98 "types": TYPE_F,
99 },
100 "rcp": {
101 "operands": (1, 0),
102 "build_fcn": (TBuilder.Rcp, TGen.tgBasic, ArgGen.agNone),
103 "types": TYPE_F,
104 },
105 "relu": {
106 "operands": (1, 0),
107 "build_fcn": (TBuilder.Relu, TGen.tgBasic, ArgGen.agNone),
108 "types": {
109 "tf": TYPE_F,
110 "tflite": list(
111 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
112 ),
113 },
114 },
Jerry Ge93912432022-07-22 10:29:13 -0700115 "relu1": {
116 "operands": (1, 0),
117 "build_fcn": (TBuilder.Relu1, TGen.tgBasic, ArgGen.agNone),
118 "types": {
119 "tf": [],
120 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
121 },
122 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000123 "relu6": {
124 "operands": (1, 0),
125 "build_fcn": (TBuilder.Relu6, TGen.tgBasic, ArgGen.agNone),
126 "types": {
127 "tf": TYPE_F,
128 "tflite": list(
129 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
130 ),
131 },
132 },
133 "leaky_relu": {
134 "operands": (1, 0),
135 "build_fcn": (TBuilder.LeakyRelu, TGen.tgBasic, ArgGen.agFloat),
136 "types": {
137 "tf": TYPE_F,
138 "tflite": list(
139 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
140 ),
141 },
142 },
TatWai Chong473eb382022-08-02 04:21:30 +0000143 "gelu": {
144 "operands": (1, 0),
145 "build_fcn": (TBuilder.Gelu, TGen.tgBasic, ArgGen.agNone),
146 "types": {
147 # Need compiler support for tf.Erf.
148 # "tf": TYPE_F,
149 "tflite": list(
150 # Only float32, int8 and uint8 supported currently
151 TYPE_F
152 + [QuantType.ALL_U8, QuantType.ALL_I8]
153 ),
154 },
155 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000156 "concat": {
157 "operands": (2, 0),
158 "build_fcn": (TBuilder.Concat, TGen.tgBasic, ArgGen.agAxes),
159 "types": TYPE_FI,
160 },
161 "bitwise_and": {
162 "operands": (2, 0),
163 "build_fcn": (TBuilder.BitwiseAnd, TGen.tgBFuzz, ArgGen.agNone),
164 "types": {"tf": TYPE_I}, # Not supported in TF Lite
165 },
166 "bitwise_or": {
167 "operands": (2, 0),
168 "build_fcn": (TBuilder.BitwiseOr, TGen.tgBFuzz, ArgGen.agNone),
169 "types": {"tf": TYPE_I}, # Not supported in TF Lite
170 },
171 "bitwise_not": {
172 "operands": (1, 0),
173 "build_fcn": (TBuilder.BitwiseNot, TGen.tgBFuzz, ArgGen.agNone),
174 "types": {"tf": TYPE_I}, # Not supported in TF Lite
175 },
176 "bitwise_xor": {
177 "operands": (2, 0),
178 "build_fcn": (TBuilder.BitwiseXor, TGen.tgBFuzz, ArgGen.agNone),
179 "types": {"tf": TYPE_I}, # Not supported in TF Lite
180 },
181 "logical_and": {
182 "operands": (2, 0),
183 "build_fcn": (TBuilder.LogicalAnd, TGen.tgBFuzz, ArgGen.agNone),
184 "types": TYPE_B,
185 },
186 "logical_or": {
187 "operands": (2, 0),
188 "build_fcn": (TBuilder.LogicalOr, TGen.tgBFuzz, ArgGen.agNone),
189 "types": TYPE_B,
190 },
191 "logical_not": {
192 "operands": (1, 0),
193 "build_fcn": (TBuilder.LogicalNot, TGen.tgBFuzz, ArgGen.agNone),
194 "types": TYPE_B,
195 },
196 "reduce_any": {
197 "operands": (1, 0),
198 "build_fcn": (TBuilder.ReduceAny, TGen.tgBasic, ArgGen.agAxesListKeepdims),
199 "types": TYPE_B,
200 },
201 "reduce_all": {
202 "operands": (1, 0),
203 "build_fcn": (TBuilder.ReduceAll, TGen.tgBasic, ArgGen.agAxesListKeepdims),
204 "types": {"tf": TYPE_B},
205 },
206 "reduce_min": {
207 "operands": (1, 0),
208 "build_fcn": (TBuilder.ReduceMin, TGen.tgBasic, ArgGen.agAxesListKeepdims),
209 "types": {
210 "tf": TYPE_FI,
211 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
212 },
213 },
214 "reduce_max": {
215 "operands": (1, 0),
216 "build_fcn": (TBuilder.ReduceMax, TGen.tgBasic, ArgGen.agAxesListKeepdims),
217 "types": {
218 "tf": TYPE_FI,
219 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
220 },
221 },
222 "reduce_sum": {
223 "operands": (1, 0),
224 "build_fcn": (TBuilder.ReduceSum, TGen.tgBasic, ArgGen.agAxesListKeepdims),
225 "types": {
226 "tf": TYPE_F,
227 # v2 converter doesn't recognize quantized reduce_sum
228 # "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
229 "tflite": TYPE_F,
230 },
231 },
232 "reduce_mean": {
233 "operands": (1, 0),
234 "build_fcn": (TBuilder.ReduceMean, TGen.tgBasic, ArgGen.agAxesListKeepdims),
235 "types": {
236 "tf": TYPE_F,
237 "tflite": list(
238 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
239 ),
240 },
241 },
242 "reduce_product": {
243 "operands": (1, 0),
244 "build_fcn": (TBuilder.ReduceProduct, TGen.tgBasic, ArgGen.agAxesListKeepdims),
245 "types": TYPE_F,
246 },
247 "min": {
248 "operands": (2, 0),
249 "build_fcn": (TBuilder.Min, TGen.tgBFuzz, ArgGen.agNone),
250 "types": TYPE_FI,
251 },
252 "max": {
253 "operands": (2, 0),
254 "build_fcn": (TBuilder.Max, TGen.tgBFuzz, ArgGen.agNone),
255 "types": TYPE_FI,
256 },
257 "pow": {
258 "operands": (2, 0),
259 "build_fcn": (TBuilder.Pow, TGen.tgBFuzz, ArgGen.agNone),
260 # Technically, integer is supported, but only for positive exponents.
261 # Needs a random argument generator.
262 "types": TYPE_F,
263 },
264 "abs": {
265 "operands": (1, 0),
266 "build_fcn": (TBuilder.Abs, TGen.tgBasic, ArgGen.agNone),
267 "types": TYPE_F,
268 },
269 "ceil": {
270 "operands": (1, 0),
271 "build_fcn": (TBuilder.Ceil, TGen.tgBasic, ArgGen.agNone),
272 "types": TYPE_F,
273 },
274 "floor": {
275 "operands": (1, 0),
276 "build_fcn": (TBuilder.Floor, TGen.tgBasic, ArgGen.agNone),
277 "types": TYPE_F,
278 },
279 "log": {
280 "operands": (1, 0),
281 "build_fcn": (TBuilder.Log, TGen.tgBasic, ArgGen.agNone),
282 "types": TYPE_F,
283 },
284 "negate": {
285 "operands": (1, 0),
286 "build_fcn": (TBuilder.Negate, TGen.tgBasic, ArgGen.agNone),
287 "types": TYPE_F,
288 },
289 "rsqrt": {
290 "operands": (1, 0),
291 "build_fcn": (TBuilder.Rsqrt, TGen.tgBasic, ArgGen.agNone),
292 "types": TYPE_F,
293 },
294 "sigmoid": {
295 "operands": (1, 0),
296 "build_fcn": (TBuilder.Sigmoid, TGen.tgBasic, ArgGen.agNone),
297 "types": {
298 "tf": TYPE_F,
299 "tflite": list(
300 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
301 ),
302 },
303 },
304 "tanh": {
305 "operands": (1, 0),
306 "build_fcn": (TBuilder.Tanh, TGen.tgBasic, ArgGen.agNone),
307 "types": {
308 "tf": TYPE_F,
309 "tflite": list(
310 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
311 ),
312 },
313 },
314 "square": {
315 "operands": (1, 0),
316 "build_fcn": (TBuilder.Square, TGen.tgBasic, ArgGen.agNone),
317 "types": TYPE_F,
318 },
319 "squared_difference": {
320 "operands": (2, 0),
321 "build_fcn": (TBuilder.SquaredDifference, TGen.tgBFuzz, ArgGen.agNone),
322 "types": TYPE_F,
323 },
324 "equal": {
325 "operands": (2, 0),
326 "build_fcn": (TBuilder.Equal, TGen.tgBFuzz, ArgGen.agNone),
327 "types": TYPE_FI,
328 },
329 "greater_equal": {
330 "operands": (2, 0),
331 "build_fcn": (TBuilder.GreaterEqual, TGen.tgBFuzz, ArgGen.agNone),
332 "types": TYPE_FI,
333 },
334 "greater": {
335 "operands": (2, 0),
336 "build_fcn": (TBuilder.Greater, TGen.tgBFuzz, ArgGen.agNone),
337 "types": TYPE_FI,
338 },
339 "less": {
340 "operands": (2, 0),
341 "build_fcn": (TBuilder.Less, TGen.tgBFuzz, ArgGen.agNone),
342 "types": TYPE_FI,
343 },
344 "less_equal": {
345 "operands": (2, 0),
346 "build_fcn": (TBuilder.LessEqual, TGen.tgBFuzz, ArgGen.agNone),
347 "types": TYPE_FI,
348 },
349 "conv2d_TEMPLATE": {
350 "operands": (1, 1),
351 "build_fcn": (TBuilder.Conv2d, TGen.tgConv2d, ArgGen.agConv2d),
352 "types": {
353 "tf": [tf.float32],
354 "tflite": [
355 tf.float32,
356 QuantType.CONV_U8_U8,
357 QuantType.CONV_I8_I8,
358 QuantType.CONV_I16_I8,
359 ],
360 },
361 "template": True,
362 },
363 "conv2d_relu_TEMPLATE": {
364 "operands": (1, 2),
365 "build_fcn": (TBuilder.Conv2dRelu, TGen.tgConv2d, ArgGen.agNone),
366 "types": {
367 "tf": [tf.float32],
368 "tflite": [
369 tf.float32,
370 QuantType.CONV_U8_U8,
371 QuantType.CONV_I8_I8,
372 QuantType.CONV_I16_I8,
373 ],
374 },
375 "template": True,
376 },
377 "conv2d_relu6_TEMPLATE": {
378 "operands": (1, 2),
379 "build_fcn": (TBuilder.Conv2dRelu6, TGen.tgConv2d, ArgGen.agNone),
380 "types": {
381 "tf": [tf.float32],
382 "tflite": [
383 tf.float32,
384 QuantType.CONV_U8_U8,
385 QuantType.CONV_I8_I8,
386 QuantType.CONV_I16_I8,
387 ],
388 },
389 "template": True,
390 },
391 "conv2d_relu_n1_to_1_TEMPLATE": {
392 "operands": (1, 2),
393 "build_fcn": (TBuilder.Conv2dReluN1To1, TGen.tgConv2d, ArgGen.agNone),
394 "types": {
395 "tf": [tf.float32],
396 "tflite": [
397 tf.float32,
398 QuantType.CONV_U8_U8,
399 QuantType.CONV_I8_I8,
400 QuantType.CONV_I16_I8,
401 ],
402 },
403 "template": True,
404 },
405 # This test is converted as:
406 # tfl.conv2d(){fused_activation_function="NONE"} + tfl.tanh()
407 # TODO: anyway to generate tfl.conv2d(){fused_activation_function="TANH"}?
408 "conv2d_tanh_TEMPLATE": {
409 "operands": (1, 2),
410 "build_fcn": (TBuilder.Conv2dTanh, TGen.tgConv2d, ArgGen.agNone),
411 "types": {
412 "tf": [tf.float32],
413 "tflite": [
414 tf.float32,
415 QuantType.CONV_U8_U8,
416 QuantType.CONV_I8_I8,
417 QuantType.CONV_I16_I8,
418 ],
419 },
420 "template": True,
421 },
422 "conv2d_bias_TEMPLATE": {
423 "operands": (1, 2),
424 "build_fcn": (TBuilder.Conv2dWithBias, TGen.tgConv2d, ArgGen.agConv2d),
425 "types": {
426 "tf": [tf.float32],
427 "tflite": [
428 tf.float32,
429 QuantType.CONV_U8_U8,
430 QuantType.CONV_I8_I8,
431 QuantType.CONV_I16_I8,
432 ],
433 },
434 "bias": True,
435 "template": True,
436 },
TatWai Chongfd629052022-07-25 04:01:58 +0000437 "conv3d_TEMPLATE": {
438 "operands": (1, 1),
439 "build_fcn": (TBuilder.Conv3d, TGen.tgConv3d, ArgGen.agConv3d),
440 "types": {
441 "tf": [tf.float32],
442 "tflite": [
443 tf.float32,
444 QuantType.CONV_U8_U8,
445 QuantType.CONV_I8_I8,
446 # Quantization to 16x8-bit not yet supported by tflite.
447 ],
448 },
449 "template": True,
450 "rank": (1, 5),
451 },
452 "conv3d_bias_TEMPLATE": {
453 "operands": (1, 2),
454 "build_fcn": (TBuilder.Conv3dWithBias, TGen.tgConv3d, ArgGen.agConv3d),
455 "types": {
456 "tf": [tf.float32],
457 "tflite": [
458 tf.float32,
459 QuantType.CONV_U8_U8,
460 QuantType.CONV_I8_I8,
461 # Quantization to 16x8-bit not yet supported by tflite.
462 ],
463 },
464 "bias": True,
465 "template": True,
466 "rank": (1, 5),
467 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000468 "depthwise_conv2d_TEMPLATE": {
469 "operands": (1, 1),
470 "build_fcn": (
471 TBuilder.DepthwiseConv2d,
472 TGen.tgDepthwiseConv2d,
473 ArgGen.agDepthwiseConv2d,
474 ),
475 "types": {
476 "tf": [tf.float32],
477 "tflite": [
478 tf.float32,
479 QuantType.CONV_U8_U8,
480 QuantType.CONV_I8_I8,
481 QuantType.CONV_I16_I8,
482 ],
483 },
484 "template": True,
485 },
486 "depthwise_conv2d_bias_TEMPLATE": {
487 "operands": (1, 2),
488 "build_fcn": (
489 TBuilder.DepthwiseConv2dWithBias,
490 TGen.tgDepthwiseConv2d,
491 ArgGen.agDepthwiseConv2d,
492 ),
493 "types": {
494 "tf": [tf.float32],
495 "tflite": [
496 tf.float32,
497 QuantType.CONV_U8_U8,
498 QuantType.CONV_I8_I8,
499 QuantType.CONV_I16_I8,
500 ],
501 },
502 "bias": True,
503 "template": True,
504 },
505 "transpose_conv2d_TEMPLATE": {
506 "operands": (1, 1),
507 "build_fcn": (
508 TBuilder.TransposeConv2d,
509 TGen.tgTransposeConv2d,
510 ArgGen.agTransposeConv2d,
511 ),
512 "types": {
513 "tf": [tf.float32],
514 "tflite": [
515 tf.float32,
516 QuantType.CONV_U8_U8,
517 QuantType.CONV_I8_I8,
518 QuantType.CONV_I16_I8,
519 ],
520 },
521 "template": True,
522 },
523 "argmax": {
524 "operands": (1, 0),
525 "build_fcn": (TBuilder.Argmax, TGen.tgBasic, ArgGen.agAxes),
526 "types": {"tf": TYPE_F},
527 },
528 "avg_pool2d": {
529 "operands": (1, 0),
530 "build_fcn": (TBuilder.AvgPool2d, TGen.tgPooling, ArgGen.agPooling),
531 "types": {
532 "tf": TYPE_F,
533 "tflite": list(
534 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
535 ),
536 },
537 },
538 "max_pool2d": {
539 "operands": (1, 0),
540 "build_fcn": (TBuilder.MaxPool2d, TGen.tgPooling, ArgGen.agPooling),
541 "types": {
542 "tf": TYPE_F,
543 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
544 # ALL_I16 not supported yet
545 # In tensorflow/compiler/mlir/lite/ir/tfl_ops.td,
546 # QI16 is missing from MaxPoolOperandAndResultConstraints
547 # If adding QI16 back this test can run through.
548 },
549 },
550 "reshape": {
551 "operands": (1, 0),
552 "build_fcn": (TBuilder.Reshape, TGen.tgBasic, ArgGen.agReshape),
553 "types": TYPE_FI,
554 },
555 "transpose": {
556 "operands": (1, 0),
557 "build_fcn": (TBuilder.Transpose, TGen.tgBasic, ArgGen.agTranspose),
558 "types": TYPE_FI,
559 },
560 "slice": {
561 "operands": (1, 0),
562 "build_fcn": (TBuilder.Slice, TGen.tgBasic, ArgGen.agSlice),
563 "types": TYPE_FI,
564 },
565 "strided_slice": {
566 "operands": (1, 0),
567 "build_fcn": (TBuilder.StridedSlice, TGen.tgBasic, ArgGen.agStridedSlice),
568 "types": TYPE_FI,
569 },
570 "select": {
571 "operands": (3, 0),
572 "build_fcn": (TBuilder.Select, TGen.tgSelect, ArgGen.agNone),
573 "types": TYPE_FI,
574 },
575 "addn": {
576 "operands": (4, 0),
577 "build_fcn": (TBuilder.Addn, TGen.tgBasic, ArgGen.agNone),
578 "types": TYPE_FI,
579 },
580 "concatv2": {
581 "operands": (4, 0),
582 "build_fcn": (TBuilder.Concatv2, TGen.tgBasic, ArgGen.agAxes),
583 "types": TYPE_FI,
584 },
585 "stack": {
586 "operands": (4, 0),
587 "build_fcn": (TBuilder.Stack, TGen.tgBasic, ArgGen.agStack),
588 "types": TYPE_FI,
589 },
590 "unstack": {
591 "operands": (1, 0),
592 "build_fcn": (TBuilder.Unstack, TGen.tgPooling, ArgGen.agAxes),
593 "types": TYPE_F,
594 },
TatWai Chongf7008da2022-09-09 09:35:40 +0000595 "mirrorpad": {
596 "operands": (1, 0),
597 "build_fcn": (TBuilder.MirrorPad, TGen.tgBasic, ArgGen.agMirrorPad),
598 "types": TYPE_FI,
599 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000600 "pad": {
601 "operands": (1, 0),
602 "build_fcn": (TBuilder.Pad, TGen.tgBasic, ArgGen.agPad),
603 "types": TYPE_F,
604 },
605 "expand_dims": {
606 "operands": (1, 0),
607 "build_fcn": (TBuilder.ExpandDims, TGen.tgBasic, ArgGen.agStack),
608 "types": TYPE_FI,
609 },
610 "shape": {
611 "operands": (1, 0),
612 "build_fcn": (TBuilder.Shape, TGen.tgBasic, ArgGen.agNone),
613 "types": TYPE_FI,
614 },
615 "rank": {
616 "operands": (1, 0),
617 "build_fcn": (TBuilder.Rank, TGen.tgBasic, ArgGen.agNone),
618 "types": TYPE_FI,
619 },
620 "fill": {
621 "operands": (1, 0),
622 "build_fcn": (TBuilder.Fill, TGen.tgBasic, ArgGen.agFill),
623 "types": TYPE_FI,
624 },
625 "elu": {
626 "operands": (1, 0),
627 "build_fcn": (TBuilder.Elu, TGen.tgBasic, ArgGen.agNone),
628 "types": TYPE_F,
629 },
630 "softmax": {
631 "operands": (1, 0),
632 "build_fcn": (TBuilder.Softmax, TGen.tgBasic, ArgGen.agNone),
633 "types": {
634 "tf": TYPE_F,
635 "tflite": list(
636 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
637 ),
638 },
639 },
640 "log_softmax": {
641 "operands": (1, 0),
642 "build_fcn": (TBuilder.LogSoftmax, TGen.tgBasic, ArgGen.agNone),
643 "types": TYPE_F,
644 },
645 "matmul": {
646 "operands": (2, 0),
647 "build_fcn": (TBuilder.MatMul, TGen.tgMatmul, ArgGen.agNone),
648 "types": {
649 "tf": TYPE_F,
650 "tflite": list(
651 TYPE_F
652 + [QuantType.ALL_U8, QuantType.ALL_I8]
653 # 16 bits matmul fail to convert
654 ),
655 },
656 },
657 "add_scalar": {
658 "operands": (1, 0),
659 "build_fcn": (TBuilder.AddScalar, TGen.tgBasic, ArgGen.agNone),
660 "types": TYPE_F,
661 },
662 "add_1d": {
663 "operands": (2, 0),
664 "build_fcn": (TBuilder.Add1d, TGen.tgBasic, ArgGen.agNone),
665 "types": TYPE_F,
666 },
667 "split": {
668 "operands": (1, 0),
669 "build_fcn": (TBuilder.Split, TGen.tgBasic, ArgGen.agSplit),
670 "types": TYPE_FI,
671 },
672 "tile": {
673 "operands": (1, 0),
674 "build_fcn": (TBuilder.Tile, TGen.tgBasic, ArgGen.agTile),
675 "types": TYPE_FI,
676 },
677 "reverse": {
678 "operands": (1, 0),
679 "build_fcn": (TBuilder.Reverse, TGen.tgBasic, ArgGen.agAxes),
680 "types": {"tf": TYPE_FI},
681 },
682 "gather": {
683 "operands": (1, 0),
684 "build_fcn": (TBuilder.Gather, TGen.tgBasic, ArgGen.agGather),
685 "types": TYPE_FI,
686 },
687 "gather_nd": {
688 "operands": (1, 0),
689 "build_fcn": (TBuilder.GatherNd, TGen.tgBasic, ArgGen.agGatherND),
690 "types": TYPE_FI,
691 },
692 "scatter_nd": {
693 "operands": (1, 0),
694 "build_fcn": (TBuilder.ScatterNd, TGen.tgBasic, ArgGen.agScatterND),
695 "types": TYPE_FI,
696 },
697 "space_to_batch": {
698 "operands": (1, 0),
699 "build_fcn": (TBuilder.SpaceToBatch, TGen.tgBasic, ArgGen.agSpaceToBatch),
700 "types": TYPE_F,
701 },
702 "batch_to_space": {
703 "operands": (1, 0),
704 "build_fcn": (TBuilder.BatchToSpace, TGen.tgBasic, ArgGen.agBatchToSpace),
705 "types": TYPE_F,
706 },
707 "space_to_depth": {
708 "operands": (1, 0),
709 "build_fcn": (TBuilder.SpaceToDepth, TGen.tgBasic, ArgGen.agSpaceToDepth),
710 "types": TYPE_F,
711 },
712 "depth_to_space": {
713 "operands": (1, 0),
714 "build_fcn": (TBuilder.DepthToSpace, TGen.tgBasic, ArgGen.agDepthToSpace),
715 "types": TYPE_F,
716 },
717 "one_hot": {
718 "operands": (3, 1),
719 "build_fcn": (TBuilder.OneHot, TGen.tgOneHot, ArgGen.agOneHot),
720 "types": TYPE_FI,
721 },
722 "fakequant": {
723 "operands": (1, 0),
724 "build_fcn": (
725 TBuilder.Fakequant,
726 TGen.tgBasic,
727 ArgGen.agFakequant,
728 ),
729 "types": {"tf": TYPE_F},
730 },
731 "resize_nearest": {
732 "operands": (1, 0),
733 "build_fcn": (TBuilder.ResizeNearest, TGen.tgPooling, ArgGen.agNone),
734 "types": {
735 "tf": TYPE_F,
736 "tflite": list(
737 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
738 ),
739 },
740 },
741 "resize_bilinear": {
742 "operands": (1, 0),
743 "build_fcn": (TBuilder.ResizeBilinear, TGen.tgPooling, ArgGen.agNone),
744 "types": {
745 "tf": TYPE_F,
746 "tflite": list(
747 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
748 ),
749 },
750 },
TatWai Chongf7326092022-06-08 12:17:14 -0700751 "resize_bilinear_v1_align_corners": {
752 "operands": (1, 0),
753 "build_fcn": (
754 TBuilder.ResizeBilinearV1AlignCorners,
755 TGen.tgPooling,
756 ArgGen.agNone,
757 ),
758 "types": {
759 "tf": TYPE_F,
760 "tflite": list(
761 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
762 ),
763 },
764 },
765 "resize_bilinear_v1_none": {
766 "operands": (1, 0),
767 "build_fcn": (TBuilder.ResizeBilinearV1None, TGen.tgPooling, ArgGen.agNone),
768 "types": {
769 "tf": TYPE_F,
770 "tflite": list(
771 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
772 ),
773 },
774 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000775 "left_shift": {
776 "operands": (1, 0),
777 "build_fcn": (TBuilder.LeftShift, TGen.tgBasic, ArgGen.agShift),
778 "types": {"tf": [tf.int32]},
779 },
780 "right_shift": {
781 "operands": (1, 0),
782 "build_fcn": (TBuilder.RightShift, TGen.tgBasic, ArgGen.agShift),
783 "types": {
784 "tf": [
785 tf.int32,
786 ]
787 },
788 },
789}
790
791# Shapes to be tested; default can be overwritten
792shape_list = [
793 (1,),
794 (64,),
795 (14, 19),
796 (13, 21, 3),
797 (1, 4, 4, 4),
798 (1, 8, 4, 17),
799 (1, 4, 8, 19),
800 (1, 32, 32, 8),
801 (1, 7, 7, 9),
TatWai Chongfd629052022-07-25 04:01:58 +0000802 (2, 2, 7, 7, 2),
803 (1, 4, 8, 21, 17),
804 (3, 32, 16, 16, 5),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000805]
806
807
808def gen_rand_shapes(args):
809 """Overwrite the global shape list with a new list of random shapes"""
810 global shape_list
811
812 rng = np.random.default_rng(args.random_seed)
813
814 # Don't let things get too big... cap the maximum volume, but let
815 # an individual dimension be 1..47
816 max_total_volume = 32 * 32 * 4
817
818 shape_list = []
TatWai Chongfd629052022-07-25 04:01:58 +0000819 # Only iterate over ranks 2, 3, 4, and 5
820 for rank in range(2, 6):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000821 for n in range(args.random_shapes):
822 new_shape = rng.integers(1, 48, size=rank)
823
TatWai Chongfd629052022-07-25 04:01:58 +0000824 # Set the batch dimension on 4D or 5D objects to 1
825 if rank == 4 or rank == 5:
Jeremy Johnson015c3552022-02-23 12:15:03 +0000826 new_shape[0] = 1
827
828 # Limit the total shape volume and throw out any
829 # shapes that wouldn't leave at least size=2 in some non-batch dimension
830 volume = 1
831 skip_shape = False
832 for i in range(rank):
833
834 volume *= new_shape[i]
835
836 # Reduce the shape, while it's larger than the maximum volume
837 while volume > max_total_volume:
838 new_shape[i] = new_shape[i] // 2
839 volume = volume // 2
840
841 # Now an untenable dimension size? Skip this one.
842 if new_shape[i] < 1:
843 skip_shape = True
844
845 if not skip_shape:
846 shape_list.append(tuple(new_shape))
847
848
849# Construct, run and save a whole tensorflow tf.function to a protobuf file
850# or convert to .tflite if it's quantized unit test
851def run_unit_test(
852 op_name,
853 args,
854 test_dir,
855 curr_shape,
856 addl_args,
857 dtype,
858 excluded_framework_list,
859 quantized_inference_dtype,
860 result_name,
861 seed,
862):
863
864 try:
865 op = TF_OP_LIST[op_name]
866 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
867
868 # Get and seed a random number generator for this test
869 rng = np.random.default_rng(seed)
870
871 # return placeholders=(str: name, np.array: value)
872 # consts=(str: name, np.array: value)
873 placeholders, consts = tensor_gen_fcn(op, curr_shape, dtype, rng)
874
875 # if test doesn't have any placeholders/consts, terminated
876 if len(placeholders) == 0 and len(consts) == 0:
877 return True
878
879 if not args.quiet:
880 print(" {} ".format(test_dir))
881
882 try:
883 os.mkdir(test_dir)
884 except FileExistsError:
885 pass
886
887 const_nodes = [value for name, value in consts]
888
889 num_placeholders = len(placeholders)
890 # if test is quantized, create tensor quantization metadata info for
891 # each input tensor, based on different quantized type
892 if quantized_inference_dtype:
893 is_quantized = True
894 # TODO: support INT8 IFM x INT4 weight later
895 if quantized_inference_dtype == QuantType.ALL_U8:
896 qzero = [128] * num_placeholders
897 numpy_dtype = [np.uint8] * num_placeholders
898 tflite_inference_dtype = tf.uint8
899 elif quantized_inference_dtype == QuantType.ALL_I8:
900 qzero = [0] * num_placeholders
901 numpy_dtype = [np.int8] * num_placeholders
902 tflite_inference_dtype = tf.int8
903 elif quantized_inference_dtype == QuantType.ALL_I16:
904 qzero = [0] * num_placeholders
905 numpy_dtype = [np.int16] * num_placeholders
906 tflite_inference_dtype = tf.int16
907 elif quantized_inference_dtype == QuantType.CONV_U8_U8:
908 assert (
909 num_placeholders == 1
910 ), "Unsupported number of placeholders for Convolution: {}".format(
911 num_placeholders
912 )
913 qzero = [128] * num_placeholders
914 if num_placeholders == 2:
915 numpy_dtype = [np.uint8, np.uint8]
916 else:
917 numpy_dtype = [np.uint8, np.uint8, np.int32]
918 tflite_inference_dtype = tf.uint8
919 elif quantized_inference_dtype == QuantType.CONV_I8_I8:
920 assert (
921 num_placeholders == 1
922 ), "Unsupported number of placeholders for Convolution: {}".format(
923 num_placeholders
924 )
925 qzero = [0] * num_placeholders
926 if num_placeholders == 2:
927 numpy_dtype = [np.int8, np.int8]
928 else:
929 numpy_dtype = [np.int8, np.int8, np.int32]
930 tflite_inference_dtype = tf.int8
931 elif quantized_inference_dtype == QuantType.CONV_I16_I8:
932 assert (
933 num_placeholders == 1
934 ), "Unsupported number of placeholders for Convolution: {}".format(
935 num_placeholders
936 )
937 if num_placeholders == 2:
938 qzero = [0, 0]
939 numpy_dtype = [np.int16, np.int8]
940 else:
941 qzero = [0, 0, 0]
942 numpy_dtype = [
943 np.int16,
944 np.int8,
945 np.int64,
946 ] # np.int64 to represent 40 bits accumulator
947 tflite_inference_dtype = tf.int16
948 else:
949 raise Exception(
950 "Unsupported fakequant dtype: {}".format(quantized_inference_dtype)
951 )
952
953 else:
954 is_quantized = False
955
956 tf_model_filename = None
957 tf_result_npy_filename = None
958 tf_result_name = None
959
960 tflite_model_filename = None
961 tflite_result_npy_filename = None
962 tflite_result_name = None
963
964 placeholder_names = []
965 placeholder_vals = []
966 placeholder_signatures = ()
967 placeholder_npy_filenames = []
968 placeholder_shapes = []
969
970 for idx, (name, val) in enumerate(placeholders):
971 placeholder_names.append(name)
972 placeholder_signatures = placeholder_signatures + (
973 tf.TensorSpec(shape=val.shape, dtype=val.dtype, name=name),
974 )
975 placeholder_npy_filenames.append("{}.npy".format(name.split(":")[0]))
976 placeholder_shapes.append(val.shape)
977
978 # Get test builder class
979 fcn_node = op_fcn(*const_nodes, *addl_args, result_name)
980 concrete_function = tf.function(input_signature=placeholder_signatures)(
981 fcn_node.eval
982 ).get_concrete_function()
983
984 if is_quantized:
985
986 assert dtype is tf.float32, "quantized test must come from float32 graph"
987
988 # 1. Quantize float placeholder npy to quantized to feed the graph
989 for idx, (name, val) in enumerate(placeholders):
990
991 # we use np.amin()/np.amax() to determine dynamic range
992 # for quantized test
993 zeropoint = 0
994 scale = 1.0
995 if numpy_dtype[idx] != np.int64:
996 qmin = np.iinfo(numpy_dtype[idx]).min
997 qmax = np.iinfo(numpy_dtype[idx]).max
998 num_bits = np.iinfo(numpy_dtype[idx]).bits
999 # 40 bit is represented as np.int64
1000 else:
1001 num_bits = 40
1002 qmin = -(1 << num_bits)
1003 qmax = (1 << num_bits) - 1
1004
1005 min_val = np.amin(val)
1006 max_val = np.amax(val)
1007
1008 # for single value tensor, we set scale equal to the abs(value),
1009 # and fix zeropoint to 128
1010 # if val > 0, it'll be represented as 129,
1011 # where val = (129 - 128) * val
1012 # if val < 0, it'll be represented as 127,
1013 # where val = (127 - 128) * (-val)
1014 # if val == 0, it'll be represted as 128, with range [-128.0, 128.0]
1015 # and let quantized 1 represent the value
1016 # also adjust effective min/max consequently
1017 if max_val == min_val:
1018 if max_val != 0:
1019 scale = abs(max_val)
1020 else:
1021 scale = 1.0
1022 min_val = float(qmin - qzero[idx]) * scale
1023 max_val = float(qmax - qzero[idx]) * scale
1024 else:
1025 scale = (max_val - min_val) / float(qmax - qmin)
1026 zeropoint = int(round((-min_val) / scale)) + qmin
1027
1028 # run through tf.fakequant first to assure quantization error aligned
1029 fakequant_val = tf.quantization.fake_quant_with_min_max_args(
1030 val,
1031 min=min_val,
1032 max=max_val,
1033 num_bits=num_bits,
1034 name="gen_quant_npy",
1035 )
1036
1037 quant_val = np.round(fakequant_val / scale).astype(np.int32) + zeropoint
1038
1039 # very few unit tests after TF hash may/2020, this quantized
1040 # value for some reason exceed [0, 255] range
1041 saved_val = np.clip(quant_val, qmin, qmax).astype(numpy_dtype[idx])
1042
1043 # saved all quantized tensor as np.int32
1044 # since TOSA numpy Cpp API only supports int32
1045 np.save(
1046 os.path.join(test_dir, placeholder_npy_filenames[idx]),
1047 saved_val.astype(np.int32),
1048 False,
1049 )
1050
1051 placeholder_vals.append(tf.convert_to_tensor(saved_val))
1052
1053 # 2. Convert the model to quantized TFLite flatbuffer
1054 module = tf.Module()
1055 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1056 [concrete_function], module
1057 )
1058 converter.optimizations = [tf.lite.Optimize.DEFAULT]
1059 converter.experimental_new_converter = True
1060
1061 # use MLIR-based post-quantizer
1062 converter.experimental_new_quantizer = True
1063
1064 flag = (
1065 tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 # noqa: E501
1066 )
1067 if tflite_inference_dtype == tf.int16:
1068 converter.target_spec.supported_ops = [flag]
1069
1070 def input_stats():
1071 for i in range(0, args.num_samples):
1072 a = [
1073 TGen.getRand(shape, tf.float32, rng)
1074 for shape in placeholder_shapes
1075 ]
1076 yield a
1077
1078 converter.representative_dataset = input_stats
1079 converter.inference_input_type = tflite_inference_dtype
1080 converter.inference_output_type = tflite_inference_dtype
1081
1082 tflite_model = converter.convert()
1083
1084 tflite_model_filename = "model.tflite"
1085
1086 # Write out converted model to disk
1087 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1088 f.write(tflite_model)
1089
1090 else: # is_quantized is False
1091
1092 # 1. Saved out numpy array directly
1093 for idx, (name, val) in enumerate(placeholders):
1094 placeholder_vals.append(tf.convert_to_tensor(val))
1095 np.save(
1096 os.path.join(test_dir, placeholder_npy_filenames[idx]), val, False
1097 )
1098
1099 # 2.a Saved out .pb if framework includes tensorflow
1100 if "tf" not in excluded_framework_list:
1101 # Write out graph as protobuf to disk
1102 tf_model_filename = "model.pb"
1103 tf.io.write_graph(
1104 concrete_function.graph, test_dir, tf_model_filename, True
1105 )
1106
1107 # 2.b Saved out .tflite if framework includes tflite
1108 if "tflite" not in excluded_framework_list:
1109 # Convert the model to TFLite flatbuffer
1110 module = tf.Module()
1111 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1112 [concrete_function], module
1113 )
1114
1115 converter.experimental_new_converter = True
1116
1117 # Even it's non-quantized int32 test, this needs to be set to tf.float32
1118 converter.inference_input_type = tf.float32
1119 converter.inference_output_type = tf.float32
1120 tflite_model = converter.convert()
1121
1122 # Write out converted model to disk
1123 tflite_model_filename = "model.tflite"
1124 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1125 f.write(tflite_model)
1126
1127 # Get TF reference result if .pb is specified
1128 if tf_model_filename:
1129 tf_result_npy_filename = "tf_result.npy"
1130 tf_result = concrete_function(*placeholder_vals)
1131 np.save(os.path.join(test_dir, tf_result_npy_filename), tf_result, False)
1132
1133 tf_result_name = result_name
1134
1135 # Get TFLite inference result if .tflite is specified
1136 if tflite_model_filename:
1137 tflite_result_npy_filename = "tflite_result.npy"
1138
1139 ops_with_optimized_only_kernel = ["elu", "ceil", "gather"]
1140
1141 if args.tflite_kernel_mode == "optimized" or (
1142 op_name in ops_with_optimized_only_kernel
1143 ):
1144 interpreter = tf.lite.Interpreter(
1145 model_path=os.path.join(test_dir, tflite_model_filename)
1146 )
1147 elif args.tflite_kernel_mode == "reference":
1148 interpreter = tf.lite.Interpreter(
1149 model_path=os.path.join(test_dir, tflite_model_filename),
1150 experimental_op_resolver_type=OpResolverType.BUILTIN_REF,
1151 )
1152 else:
1153 assert 0, "unknown tflite interpreter mode {}".format(
1154 args.tflite_kernel_mode
1155 )
1156 interpreter.allocate_tensors()
1157
1158 input_details = interpreter.get_input_details()
1159 output_details = interpreter.get_output_details()
1160
1161 assert len(input_details) == len(
1162 placeholder_vals
1163 ), "number of placeholder mismatch"
1164
1165 for idx, val in enumerate(placeholder_vals):
1166 interpreter.set_tensor(input_details[idx]["index"], val.numpy())
1167
1168 interpreter.invoke()
1169 tflite_result = interpreter.get_tensor(output_details[0]["index"])
1170
1171 np.save(
1172 os.path.join(test_dir, tflite_result_npy_filename), tflite_result, False
1173 )
1174
1175 # Result tensor name would change after converting to TFLite flatbuffer
1176 # Overwrite the information from TFLite models directly.
1177 # Assume single result tensor now
1178 tflite_result_name = output_details[0]["name"]
1179
1180 # Write out test descriptor
1181 write_test_json(
1182 filename=os.path.join(test_dir, "test.json"),
1183 tf_model_filename=tf_model_filename,
1184 tf_result_npy_filename=tf_result_npy_filename,
1185 tf_result_name=tf_result_name,
1186 tflite_model_filename=tflite_model_filename,
1187 tflite_result_npy_filename=tflite_result_npy_filename,
1188 tflite_result_name=tflite_result_name,
1189 ifm_name=placeholder_names,
1190 ifm_file=placeholder_npy_filenames,
1191 ifm_shape=placeholder_shapes,
1192 framework_exclusions=excluded_framework_list,
1193 quantized=is_quantized,
1194 )
1195 except Exception as e:
1196 msg = "Error running task: {}".format(e)
1197 print(msg)
1198 print(
1199 "".join(
1200 traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)
1201 )
1202 )
1203 return False
1204 return True
1205
1206
1207def build_const_net(
1208 args,
1209 curr_shape,
1210 op_name,
1211 dtype,
1212 excluded_framework_list,
1213 quantized_inference_dtype,
1214 result_name,
1215 seed,
1216 rng,
1217 filter,
1218 unit_test_args,
1219):
1220
1221 if quantized_inference_dtype:
1222 quant_dtype = get_tf_dtype(quantized_inference_dtype)
1223 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, quant_dtype))
1224 else:
1225 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, dtype))
1226 test_dir = os.path.join(args.output_dir, test_dir)
1227
1228 # If the operator has an additional function to generate arguments, call it
1229 # here and iterate through the argument list that it generates
1230 op = TF_OP_LIST[op_name]
1231 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1232
TatWai Chongfd629052022-07-25 04:01:58 +00001233 try:
1234 rank_lo, rank_hi = op["rank"]
1235 except KeyError:
1236 # Set testing rank to (1, 4) in default.
1237 rank_lo = 1
1238 rank_hi = 4
1239
1240 if len(curr_shape) not in range(rank_lo, rank_hi + 1):
1241 return
1242
Jeremy Johnson015c3552022-02-23 12:15:03 +00001243 addl_args_tuple = arg_gen_fcn(op, curr_shape, rng)
1244 for desc, addl_args in addl_args_tuple:
Jeremy Johnson0e6218e2022-05-05 17:08:04 +01001245 # Only filter on the full test_name, not the output directory
1246 _, test_name = os.path.split(test_dir + desc)
1247 if not filter or filter.search(test_name):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001248 unit_test_args.append(
1249 [
1250 op_name,
1251 args,
1252 test_dir + desc,
1253 curr_shape,
1254 addl_args,
1255 dtype,
1256 excluded_framework_list,
1257 quantized_inference_dtype,
1258 result_name,
1259 seed,
1260 ]
1261 )
1262
1263
1264# python hash is not reproducible, create hash for our purpose
1265def op_name_hash(op_name):
1266 result = 0xDEADBEEF
1267 for ch in op_name:
1268 if result & 1:
1269 result = (ord(ch) << 24) ^ (result >> 1) ^ 0x82608EDB
1270 else:
1271 result = (ord(ch) << 24) ^ (result >> 1)
1272
1273 return result
1274
1275
1276def generate_op_tests(args, op_name, shape_list, result_name, filter, unit_test_args):
1277
1278 if not args.quiet:
1279 print(
1280 "Generating tests for {} ".format(
1281 op_name
1282 )
1283 )
1284
1285 op = TF_OP_LIST[op_name]
1286
1287 # Seed the RNG so that we get the same random tests for each test each time
1288 # If the number of tests for a given generation function changes, the tests
1289 # for that operator may also change accordingly, but this will at least keep
1290 # down churn across operators.
1291
1292 bounded_hash_val = (args.random_seed + op_name_hash(op_name)) % np.iinfo(
1293 np.int32
1294 ).max
1295 rng = np.random.default_rng(bounded_hash_val)
1296
1297 # this is a dictionary with 'tf' and 'tflite' as key
1298 # and value being the data types we want to test under these framework
1299
1300 if isinstance(op["types"], dict):
1301 try:
1302 tf_dtypes = op["types"]["tf"]
1303 except KeyError:
1304 tf_dtypes = []
1305 try:
1306 tflite_dtypes = op["types"]["tflite"]
1307 except KeyError:
1308 tflite_dtypes = []
1309 elif isinstance(op["types"], list):
1310 tf_dtypes = op["types"]
1311 tflite_dtypes = op["types"]
1312
1313 tf_nonquantized_dtypes = tf_dtypes # tf doesn't support quantized data types
1314 tflite_quantized_dtypes = []
1315 tflite_nonquantized_dtypes = []
1316 for dtype in tflite_dtypes:
1317 if isinstance(dtype, QuantType):
1318 tflite_quantized_dtypes.append(dtype)
1319 else:
1320 tflite_nonquantized_dtypes.append(dtype)
1321
1322 nonquantized_dtypes_set = set(tf_nonquantized_dtypes).union(
1323 set(tflite_nonquantized_dtypes)
1324 )
1325 nonquantized_dtypes = list(nonquantized_dtypes_set)
1326 quantized_dtypes = tflite_quantized_dtypes
1327
1328 # populate non quantized unit test arguments
1329 for dtype in nonquantized_dtypes:
1330
1331 excluded_framework_set = set(ALL_FRAMEWORKS)
1332 if dtype in tf_nonquantized_dtypes:
1333 excluded_framework_set.remove("tf")
1334 if dtype in tflite_nonquantized_dtypes:
1335 excluded_framework_set.remove("tflite")
1336 excluded_framework_list = list(excluded_framework_set)
1337
1338 for curr_shape in shape_list:
1339 build_const_net(
1340 args,
1341 curr_shape,
1342 op_name,
1343 dtype,
1344 excluded_framework_list,
1345 None,
1346 result_name,
1347 bounded_hash_val,
1348 rng,
1349 filter,
1350 unit_test_args,
1351 )
1352
1353 # populate quantized unit test arguments
1354 # must exclude 'tf' and source dtype being tf.float32
1355 for dtype in quantized_dtypes:
1356 for curr_shape in shape_list:
1357 build_const_net(
1358 args,
1359 curr_shape,
1360 op_name,
1361 tf.float32,
1362 ["tf"],
1363 dtype,
1364 result_name,
1365 bounded_hash_val,
1366 rng,
1367 filter,
1368 unit_test_args,
1369 )
1370
1371 return unit_test_args
1372
1373
1374def createDynamicOpLists():
1375 """The templated operators are conv2d-style operators with a number of kernel
1376 sizes. Since the operator is unchanged, we generate the range of kernel
1377 sizes here in this loop and remove the original templates from the list.
1378
1379 This could be expanded to non-conv2d-style operators in the future."""
1380
1381 # Dynamically create op lists for convolutions with a list of kernel sizes
1382 KERNELS = [
1383 [1, 1],
1384 [3, 3],
1385 [5, 5],
1386 ]
1387
TatWai Chongfd629052022-07-25 04:01:58 +00001388 # dim = [D, H, W]
1389 KERNELS_3D = [
1390 [1, 1, 1],
1391 [2, 3, 3],
1392 [3, 5, 5],
1393 ]
1394
Jeremy Johnson015c3552022-02-23 12:15:03 +00001395 TEMPLATE_LIST = [
1396 "conv2d",
1397 "conv2d_bias",
1398 "conv2d_relu",
1399 "conv2d_relu6",
1400 "conv2d_relu_n1_to_1",
1401 "conv2d_tanh",
1402 "depthwise_conv2d",
1403 "depthwise_conv2d_bias",
1404 "transpose_conv2d",
1405 ]
1406
TatWai Chongfd629052022-07-25 04:01:58 +00001407 TEMPLATE_LIST_CONV3D = [
1408 "conv3d",
1409 "conv3d_bias",
1410 ]
1411
Jeremy Johnson015c3552022-02-23 12:15:03 +00001412 for t in TEMPLATE_LIST:
1413 for k in KERNELS:
1414 testName = "{}_{}x{}".format(t, k[0], k[1])
1415 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1416 TF_OP_LIST[testName]["filter"] = k
1417 TF_OP_LIST[testName]["template"] = False
1418
TatWai Chongfd629052022-07-25 04:01:58 +00001419 # The existing operators don't support the dimension of kernel that is higher than 2.
1420 for t in TEMPLATE_LIST_CONV3D:
1421 for k in KERNELS_3D:
1422 testName = "{}_{}x{}x{}".format(t, k[0], k[1], k[2])
1423 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1424 TF_OP_LIST[testName]["filter"] = k
1425 TF_OP_LIST[testName]["template"] = False
1426
Jeremy Johnson015c3552022-02-23 12:15:03 +00001427 # Delete any templates after having created any dynamic ops
1428 # This is a two-pass operation because it's bad practice to delete
1429 # keys from dictionaries while iterating
1430 keyList = []
1431 for k in TF_OP_LIST:
1432 try:
1433 if TF_OP_LIST[k]["template"]:
1434 keyList.append(k)
1435 continue
1436 except KeyError:
1437 pass
1438
1439 for k in keyList:
1440 del TF_OP_LIST[k]
1441
1442
1443def main():
1444 parser = argparse.ArgumentParser()
1445 parser.add_argument(
1446 "--seed", dest="random_seed", default=42, type=int, help="Random seed"
1447 )
1448 parser.add_argument(
1449 "--random-shapes",
1450 dest="random_shapes",
1451 default=0,
1452 type=int,
1453 help=(
1454 "Use N random shapes of each rank for generating tests,"
1455 "seeded with random seed"
1456 ),
1457 )
1458 parser.add_argument(
1459 "-o",
1460 "--output-dir",
1461 dest="output_dir",
1462 default=".",
1463 type=str,
1464 help="Test output directory path prefix",
1465 )
1466 parser.add_argument(
1467 "-q",
1468 "--quiet",
1469 dest="quiet",
1470 default=False,
1471 action="store_true",
1472 help="Do not print test names",
1473 )
1474 parser.add_argument(
1475 "-j", "--jobs", dest="jobs", type=int, default=1, help="Number of parallel jobs"
1476 )
1477 parser.add_argument(
1478 "-m",
1479 "--tflite-kernel-mode",
1480 dest="tflite_kernel_mode",
1481 type=str,
1482 choices=["reference", "optimized"],
1483 default="reference",
1484 help="TFLite interpreter kernel mode",
1485 )
1486 parser.add_argument(
1487 "--num-samples",
1488 dest="num_samples",
1489 default=200,
1490 type=int,
1491 help="Number of input samples for post-training quantization",
1492 )
1493 parser.add_argument(
1494 "--filter",
1495 dest="filter",
1496 default="",
1497 type=str,
1498 help="Filter test names by this expression",
1499 )
1500 args = parser.parse_args()
1501
1502 # Turn the filter into a re object if present
1503 filter = None
1504 if args.filter != "":
1505 filter = re.compile(args.filter)
1506
1507 # Autodetect CPU count
1508 if args.jobs <= 0:
1509 args.jobs = os.cpu_count()
1510
1511 # Disable TF info messages
1512 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
1513
1514 try:
1515 os.makedirs(args.output_dir)
1516 except FileExistsError:
1517 pass
1518
1519 if args.random_shapes:
1520 gen_rand_shapes(args)
1521
1522 # Build dynamic ops
1523 createDynamicOpLists()
1524
1525 # Generate the test list and arguments to run_unit_test()
1526 unit_test_args = []
1527
1528 for op in TF_OP_LIST:
1529 generate_op_tests(args, op, shape_list, "result", filter, unit_test_args)
1530
1531 errors = 0
1532 for t in unit_test_args:
1533 if not run_unit_test(*t):
1534 errors = errors + 1
1535
1536 if not args.quiet:
1537 print("\nAll tasks done - with {} errors".format(errors))
1538
1539 return 1 if errors else 0
1540
1541
1542if __name__ == "__main__":
1543 exit(main())