blob: 5cc764fced8643ec2f389a8685c5dc666811f897 [file] [log] [blame]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001#!/usr/bin/env python3
2# Copyright (c) 2020-2022, ARM Limited.
3# SPDX-License-Identifier: Apache-2.0
4import argparse
5import os
6import re
7import traceback
8
9import numpy as np
10
11# Level | Level for Humans | Level Description
12# -------|------------------|------------------------------------
13# 0 | DEBUG | [Default] Print all messages
14# 1 | INFO | Filter out INFO messages
15# 2 | WARNING | Filter out INFO & WARNING messages
16# 3 | ERROR | Filter out all messages
17# Filter tensorflow debug message except errors
18os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
19
20# Flake8 E402 - ignore imports not at top of file to allow os.environ setting
21import tensorflow as tf # noqa: E402
22from frameworks.write_test_json import write_test_json # noqa: E402
23from frameworks.arg_gen import ArgGen # noqa: E402
24from frameworks.tensor_gen import TGen # noqa: E402
25from frameworks.test_builder import TBuilder # noqa: E402
Jeremy Johnson5d1a3472022-03-31 09:50:06 +010026from frameworks.test_gen_utils import ( # noqa: E402
Jeremy Johnson015c3552022-02-23 12:15:03 +000027 QuantType,
28 get_tf_dtype,
29 get_shape_str,
30) # noqa: E402
31from tensorflow.lite.python.interpreter import OpResolverType # noqa: E402
32
33# All of the supported frameworks
34ALL_FRAMEWORKS = ["tf", "tflite"]
35
36# Lists of different data types
37TYPE_F = [tf.float32]
38TYPE_I = [tf.int32]
39TYPE_FI = [tf.float32, tf.int32]
40TYPE_B = [tf.bool]
41TYPE_FIB = [tf.float32, tf.int32, tf.bool]
42TYPE_H = [tf.float16]
43TYPE_FH = [tf.float32, tf.float16]
44TYPE_FHI = [tf.float32, tf.float16, tf.int32]
45TYPE_FHIB = [tf.float32, tf.float16, tf.int32, tf.bool]
46
47# The list of operator tests
48# Each dictionary entry for an op is a dictionary with the following required members:
49# 'operands': tuple (number_of_placeholder_tensors, number_of_constant_tensors)
50# 'build_fcn: tuple (Test builder function, Tensor generator function,
51# Argument generator function)
52# 'types': list of Tensorflow types that should be tested for this op
53# OR
54# a dictionary of {'framework_name': [type_list] } for cases where only
55# a subset of the types should be tested in each framework. This can also
56# be used to restrict an operator to a particular framework.
57#
58# And optional members:
59# 'template': boolean (indicates that this is a templated op which gets further
60# processing in createDynamicOpLists)
61# 'bias': boolean indicating that there is a bias component to be generated
62# 'qtypes': List of QuantType quantized types to generate for this op
TatWai Chongfd629052022-07-25 04:01:58 +000063# 'rank': tuple (lowest rank, highest rank). Dimension range of input tensor.
Jeremy Johnson015c3552022-02-23 12:15:03 +000064
65TF_OP_LIST = {
66 "add": {
67 "operands": (2, 0),
68 "build_fcn": (TBuilder.Add, TGen.tgBFuzz, ArgGen.agNone),
69 "types": {
70 "tf": TYPE_FI,
71 "tflite": list(
72 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
73 ),
74 },
75 },
76 "sub": {
77 "operands": (2, 0),
78 "build_fcn": (TBuilder.Sub, TGen.tgBFuzz, ArgGen.agNone),
79 "types": {
80 "tf": TYPE_FI,
81 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
82 # QuantType.ALL_I16 fail in TFLite conversion
83 },
84 },
85 "mul": {
86 "operands": (2, 0),
87 "build_fcn": (TBuilder.Mul, TGen.tgBFuzz, ArgGen.agNone),
88 "types": {
89 "tf": TYPE_FI,
90 "tflite": list(
91 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
92 ),
93 },
94 },
95 "exp": {
96 "operands": (1, 0),
97 "build_fcn": (TBuilder.Exp, TGen.tgBasic, ArgGen.agNone),
98 "types": TYPE_F,
99 },
100 "rcp": {
101 "operands": (1, 0),
102 "build_fcn": (TBuilder.Rcp, TGen.tgBasic, ArgGen.agNone),
103 "types": TYPE_F,
104 },
105 "relu": {
106 "operands": (1, 0),
107 "build_fcn": (TBuilder.Relu, TGen.tgBasic, ArgGen.agNone),
108 "types": {
109 "tf": TYPE_F,
110 "tflite": list(
111 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
112 ),
113 },
114 },
Jerry Ge93912432022-07-22 10:29:13 -0700115 "relu1": {
116 "operands": (1, 0),
117 "build_fcn": (TBuilder.Relu1, TGen.tgBasic, ArgGen.agNone),
118 "types": {
119 "tf": [],
120 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
121 },
122 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000123 "relu6": {
124 "operands": (1, 0),
125 "build_fcn": (TBuilder.Relu6, TGen.tgBasic, ArgGen.agNone),
126 "types": {
127 "tf": TYPE_F,
128 "tflite": list(
129 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
130 ),
131 },
132 },
133 "leaky_relu": {
134 "operands": (1, 0),
135 "build_fcn": (TBuilder.LeakyRelu, TGen.tgBasic, ArgGen.agFloat),
136 "types": {
137 "tf": TYPE_F,
138 "tflite": list(
139 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
140 ),
141 },
142 },
TatWai Chong41a04fe2022-11-03 21:44:32 +0000143 "prelu": {
144 "operands": (1, 0),
145 "build_fcn": (TBuilder.Prelu, TGen.tgBasic, ArgGen.agNone),
146 "types": {
147 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
148 },
149 },
TatWai Chong473eb382022-08-02 04:21:30 +0000150 "gelu": {
151 "operands": (1, 0),
152 "build_fcn": (TBuilder.Gelu, TGen.tgBasic, ArgGen.agNone),
153 "types": {
154 # Need compiler support for tf.Erf.
155 # "tf": TYPE_F,
156 "tflite": list(
157 # Only float32, int8 and uint8 supported currently
158 TYPE_F
159 + [QuantType.ALL_U8, QuantType.ALL_I8]
160 ),
161 },
162 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000163 "concat": {
164 "operands": (2, 0),
165 "build_fcn": (TBuilder.Concat, TGen.tgBasic, ArgGen.agAxes),
166 "types": TYPE_FI,
167 },
168 "bitwise_and": {
169 "operands": (2, 0),
170 "build_fcn": (TBuilder.BitwiseAnd, TGen.tgBFuzz, ArgGen.agNone),
171 "types": {"tf": TYPE_I}, # Not supported in TF Lite
172 },
173 "bitwise_or": {
174 "operands": (2, 0),
175 "build_fcn": (TBuilder.BitwiseOr, TGen.tgBFuzz, ArgGen.agNone),
176 "types": {"tf": TYPE_I}, # Not supported in TF Lite
177 },
178 "bitwise_not": {
179 "operands": (1, 0),
180 "build_fcn": (TBuilder.BitwiseNot, TGen.tgBFuzz, ArgGen.agNone),
181 "types": {"tf": TYPE_I}, # Not supported in TF Lite
182 },
183 "bitwise_xor": {
184 "operands": (2, 0),
185 "build_fcn": (TBuilder.BitwiseXor, TGen.tgBFuzz, ArgGen.agNone),
186 "types": {"tf": TYPE_I}, # Not supported in TF Lite
187 },
188 "logical_and": {
189 "operands": (2, 0),
190 "build_fcn": (TBuilder.LogicalAnd, TGen.tgBFuzz, ArgGen.agNone),
191 "types": TYPE_B,
192 },
193 "logical_or": {
194 "operands": (2, 0),
195 "build_fcn": (TBuilder.LogicalOr, TGen.tgBFuzz, ArgGen.agNone),
196 "types": TYPE_B,
197 },
198 "logical_not": {
199 "operands": (1, 0),
200 "build_fcn": (TBuilder.LogicalNot, TGen.tgBFuzz, ArgGen.agNone),
201 "types": TYPE_B,
202 },
203 "reduce_any": {
204 "operands": (1, 0),
205 "build_fcn": (TBuilder.ReduceAny, TGen.tgBasic, ArgGen.agAxesListKeepdims),
206 "types": TYPE_B,
207 },
208 "reduce_all": {
209 "operands": (1, 0),
210 "build_fcn": (TBuilder.ReduceAll, TGen.tgBasic, ArgGen.agAxesListKeepdims),
211 "types": {"tf": TYPE_B},
212 },
213 "reduce_min": {
214 "operands": (1, 0),
215 "build_fcn": (TBuilder.ReduceMin, TGen.tgBasic, ArgGen.agAxesListKeepdims),
216 "types": {
217 "tf": TYPE_FI,
218 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
219 },
220 },
221 "reduce_max": {
222 "operands": (1, 0),
223 "build_fcn": (TBuilder.ReduceMax, TGen.tgBasic, ArgGen.agAxesListKeepdims),
224 "types": {
225 "tf": TYPE_FI,
226 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
227 },
228 },
229 "reduce_sum": {
230 "operands": (1, 0),
231 "build_fcn": (TBuilder.ReduceSum, TGen.tgBasic, ArgGen.agAxesListKeepdims),
232 "types": {
233 "tf": TYPE_F,
234 # v2 converter doesn't recognize quantized reduce_sum
235 # "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
236 "tflite": TYPE_F,
237 },
238 },
239 "reduce_mean": {
240 "operands": (1, 0),
241 "build_fcn": (TBuilder.ReduceMean, TGen.tgBasic, ArgGen.agAxesListKeepdims),
242 "types": {
243 "tf": TYPE_F,
244 "tflite": list(
245 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
246 ),
247 },
248 },
249 "reduce_product": {
250 "operands": (1, 0),
251 "build_fcn": (TBuilder.ReduceProduct, TGen.tgBasic, ArgGen.agAxesListKeepdims),
252 "types": TYPE_F,
253 },
254 "min": {
255 "operands": (2, 0),
256 "build_fcn": (TBuilder.Min, TGen.tgBFuzz, ArgGen.agNone),
257 "types": TYPE_FI,
258 },
259 "max": {
260 "operands": (2, 0),
261 "build_fcn": (TBuilder.Max, TGen.tgBFuzz, ArgGen.agNone),
262 "types": TYPE_FI,
263 },
264 "pow": {
265 "operands": (2, 0),
266 "build_fcn": (TBuilder.Pow, TGen.tgBFuzz, ArgGen.agNone),
267 # Technically, integer is supported, but only for positive exponents.
268 # Needs a random argument generator.
269 "types": TYPE_F,
270 },
271 "abs": {
272 "operands": (1, 0),
273 "build_fcn": (TBuilder.Abs, TGen.tgBasic, ArgGen.agNone),
274 "types": TYPE_F,
275 },
276 "ceil": {
277 "operands": (1, 0),
278 "build_fcn": (TBuilder.Ceil, TGen.tgBasic, ArgGen.agNone),
279 "types": TYPE_F,
280 },
281 "floor": {
282 "operands": (1, 0),
283 "build_fcn": (TBuilder.Floor, TGen.tgBasic, ArgGen.agNone),
284 "types": TYPE_F,
285 },
286 "log": {
287 "operands": (1, 0),
288 "build_fcn": (TBuilder.Log, TGen.tgBasic, ArgGen.agNone),
289 "types": TYPE_F,
290 },
291 "negate": {
292 "operands": (1, 0),
293 "build_fcn": (TBuilder.Negate, TGen.tgBasic, ArgGen.agNone),
294 "types": TYPE_F,
295 },
296 "rsqrt": {
297 "operands": (1, 0),
298 "build_fcn": (TBuilder.Rsqrt, TGen.tgBasic, ArgGen.agNone),
299 "types": TYPE_F,
300 },
301 "sigmoid": {
302 "operands": (1, 0),
303 "build_fcn": (TBuilder.Sigmoid, TGen.tgBasic, ArgGen.agNone),
304 "types": {
305 "tf": TYPE_F,
306 "tflite": list(
307 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
308 ),
309 },
310 },
311 "tanh": {
312 "operands": (1, 0),
313 "build_fcn": (TBuilder.Tanh, TGen.tgBasic, ArgGen.agNone),
314 "types": {
315 "tf": TYPE_F,
316 "tflite": list(
317 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
318 ),
319 },
320 },
321 "square": {
322 "operands": (1, 0),
323 "build_fcn": (TBuilder.Square, TGen.tgBasic, ArgGen.agNone),
324 "types": TYPE_F,
325 },
326 "squared_difference": {
327 "operands": (2, 0),
328 "build_fcn": (TBuilder.SquaredDifference, TGen.tgBFuzz, ArgGen.agNone),
329 "types": TYPE_F,
330 },
331 "equal": {
332 "operands": (2, 0),
333 "build_fcn": (TBuilder.Equal, TGen.tgBFuzz, ArgGen.agNone),
334 "types": TYPE_FI,
335 },
336 "greater_equal": {
337 "operands": (2, 0),
338 "build_fcn": (TBuilder.GreaterEqual, TGen.tgBFuzz, ArgGen.agNone),
339 "types": TYPE_FI,
340 },
341 "greater": {
342 "operands": (2, 0),
343 "build_fcn": (TBuilder.Greater, TGen.tgBFuzz, ArgGen.agNone),
344 "types": TYPE_FI,
345 },
346 "less": {
347 "operands": (2, 0),
348 "build_fcn": (TBuilder.Less, TGen.tgBFuzz, ArgGen.agNone),
349 "types": TYPE_FI,
350 },
351 "less_equal": {
352 "operands": (2, 0),
353 "build_fcn": (TBuilder.LessEqual, TGen.tgBFuzz, ArgGen.agNone),
354 "types": TYPE_FI,
355 },
356 "conv2d_TEMPLATE": {
357 "operands": (1, 1),
358 "build_fcn": (TBuilder.Conv2d, TGen.tgConv2d, ArgGen.agConv2d),
359 "types": {
360 "tf": [tf.float32],
361 "tflite": [
362 tf.float32,
363 QuantType.CONV_U8_U8,
364 QuantType.CONV_I8_I8,
365 QuantType.CONV_I16_I8,
366 ],
367 },
368 "template": True,
369 },
370 "conv2d_relu_TEMPLATE": {
371 "operands": (1, 2),
372 "build_fcn": (TBuilder.Conv2dRelu, TGen.tgConv2d, ArgGen.agNone),
373 "types": {
374 "tf": [tf.float32],
375 "tflite": [
376 tf.float32,
377 QuantType.CONV_U8_U8,
378 QuantType.CONV_I8_I8,
379 QuantType.CONV_I16_I8,
380 ],
381 },
382 "template": True,
383 },
384 "conv2d_relu6_TEMPLATE": {
385 "operands": (1, 2),
386 "build_fcn": (TBuilder.Conv2dRelu6, TGen.tgConv2d, ArgGen.agNone),
387 "types": {
388 "tf": [tf.float32],
389 "tflite": [
390 tf.float32,
391 QuantType.CONV_U8_U8,
392 QuantType.CONV_I8_I8,
393 QuantType.CONV_I16_I8,
394 ],
395 },
396 "template": True,
397 },
398 "conv2d_relu_n1_to_1_TEMPLATE": {
399 "operands": (1, 2),
400 "build_fcn": (TBuilder.Conv2dReluN1To1, TGen.tgConv2d, ArgGen.agNone),
401 "types": {
402 "tf": [tf.float32],
403 "tflite": [
404 tf.float32,
405 QuantType.CONV_U8_U8,
406 QuantType.CONV_I8_I8,
407 QuantType.CONV_I16_I8,
408 ],
409 },
410 "template": True,
411 },
412 # This test is converted as:
413 # tfl.conv2d(){fused_activation_function="NONE"} + tfl.tanh()
414 # TODO: anyway to generate tfl.conv2d(){fused_activation_function="TANH"}?
415 "conv2d_tanh_TEMPLATE": {
416 "operands": (1, 2),
417 "build_fcn": (TBuilder.Conv2dTanh, TGen.tgConv2d, ArgGen.agNone),
418 "types": {
419 "tf": [tf.float32],
420 "tflite": [
421 tf.float32,
422 QuantType.CONV_U8_U8,
423 QuantType.CONV_I8_I8,
424 QuantType.CONV_I16_I8,
425 ],
426 },
427 "template": True,
428 },
429 "conv2d_bias_TEMPLATE": {
430 "operands": (1, 2),
431 "build_fcn": (TBuilder.Conv2dWithBias, TGen.tgConv2d, ArgGen.agConv2d),
432 "types": {
433 "tf": [tf.float32],
434 "tflite": [
435 tf.float32,
436 QuantType.CONV_U8_U8,
437 QuantType.CONV_I8_I8,
438 QuantType.CONV_I16_I8,
439 ],
440 },
441 "bias": True,
442 "template": True,
443 },
TatWai Chongfd629052022-07-25 04:01:58 +0000444 "conv3d_TEMPLATE": {
445 "operands": (1, 1),
446 "build_fcn": (TBuilder.Conv3d, TGen.tgConv3d, ArgGen.agConv3d),
447 "types": {
448 "tf": [tf.float32],
449 "tflite": [
450 tf.float32,
451 QuantType.CONV_U8_U8,
452 QuantType.CONV_I8_I8,
453 # Quantization to 16x8-bit not yet supported by tflite.
454 ],
455 },
456 "template": True,
457 "rank": (1, 5),
458 },
459 "conv3d_bias_TEMPLATE": {
460 "operands": (1, 2),
461 "build_fcn": (TBuilder.Conv3dWithBias, TGen.tgConv3d, ArgGen.agConv3d),
462 "types": {
463 "tf": [tf.float32],
464 "tflite": [
465 tf.float32,
466 QuantType.CONV_U8_U8,
467 QuantType.CONV_I8_I8,
468 # Quantization to 16x8-bit not yet supported by tflite.
469 ],
470 },
471 "bias": True,
472 "template": True,
473 "rank": (1, 5),
474 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000475 "depthwise_conv2d_TEMPLATE": {
476 "operands": (1, 1),
477 "build_fcn": (
478 TBuilder.DepthwiseConv2d,
479 TGen.tgDepthwiseConv2d,
480 ArgGen.agDepthwiseConv2d,
481 ),
482 "types": {
483 "tf": [tf.float32],
484 "tflite": [
485 tf.float32,
486 QuantType.CONV_U8_U8,
487 QuantType.CONV_I8_I8,
488 QuantType.CONV_I16_I8,
489 ],
490 },
491 "template": True,
492 },
493 "depthwise_conv2d_bias_TEMPLATE": {
494 "operands": (1, 2),
495 "build_fcn": (
496 TBuilder.DepthwiseConv2dWithBias,
497 TGen.tgDepthwiseConv2d,
498 ArgGen.agDepthwiseConv2d,
499 ),
500 "types": {
501 "tf": [tf.float32],
502 "tflite": [
503 tf.float32,
504 QuantType.CONV_U8_U8,
505 QuantType.CONV_I8_I8,
506 QuantType.CONV_I16_I8,
507 ],
508 },
509 "bias": True,
510 "template": True,
511 },
512 "transpose_conv2d_TEMPLATE": {
513 "operands": (1, 1),
514 "build_fcn": (
515 TBuilder.TransposeConv2d,
516 TGen.tgTransposeConv2d,
517 ArgGen.agTransposeConv2d,
518 ),
519 "types": {
520 "tf": [tf.float32],
521 "tflite": [
522 tf.float32,
523 QuantType.CONV_U8_U8,
524 QuantType.CONV_I8_I8,
525 QuantType.CONV_I16_I8,
526 ],
527 },
528 "template": True,
529 },
530 "argmax": {
531 "operands": (1, 0),
532 "build_fcn": (TBuilder.Argmax, TGen.tgBasic, ArgGen.agAxes),
533 "types": {"tf": TYPE_F},
534 },
535 "avg_pool2d": {
536 "operands": (1, 0),
537 "build_fcn": (TBuilder.AvgPool2d, TGen.tgPooling, ArgGen.agPooling),
538 "types": {
539 "tf": TYPE_F,
540 "tflite": list(
541 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
542 ),
543 },
544 },
545 "max_pool2d": {
546 "operands": (1, 0),
547 "build_fcn": (TBuilder.MaxPool2d, TGen.tgPooling, ArgGen.agPooling),
548 "types": {
549 "tf": TYPE_F,
550 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
551 # ALL_I16 not supported yet
552 # In tensorflow/compiler/mlir/lite/ir/tfl_ops.td,
553 # QI16 is missing from MaxPoolOperandAndResultConstraints
554 # If adding QI16 back this test can run through.
555 },
556 },
557 "reshape": {
558 "operands": (1, 0),
559 "build_fcn": (TBuilder.Reshape, TGen.tgBasic, ArgGen.agReshape),
560 "types": TYPE_FI,
561 },
562 "transpose": {
563 "operands": (1, 0),
564 "build_fcn": (TBuilder.Transpose, TGen.tgBasic, ArgGen.agTranspose),
565 "types": TYPE_FI,
566 },
567 "slice": {
568 "operands": (1, 0),
569 "build_fcn": (TBuilder.Slice, TGen.tgBasic, ArgGen.agSlice),
570 "types": TYPE_FI,
571 },
572 "strided_slice": {
573 "operands": (1, 0),
574 "build_fcn": (TBuilder.StridedSlice, TGen.tgBasic, ArgGen.agStridedSlice),
575 "types": TYPE_FI,
576 },
577 "select": {
578 "operands": (3, 0),
579 "build_fcn": (TBuilder.Select, TGen.tgSelect, ArgGen.agNone),
580 "types": TYPE_FI,
581 },
582 "addn": {
583 "operands": (4, 0),
584 "build_fcn": (TBuilder.Addn, TGen.tgBasic, ArgGen.agNone),
585 "types": TYPE_FI,
586 },
587 "concatv2": {
588 "operands": (4, 0),
589 "build_fcn": (TBuilder.Concatv2, TGen.tgBasic, ArgGen.agAxes),
590 "types": TYPE_FI,
591 },
592 "stack": {
593 "operands": (4, 0),
594 "build_fcn": (TBuilder.Stack, TGen.tgBasic, ArgGen.agStack),
595 "types": TYPE_FI,
596 },
597 "unstack": {
598 "operands": (1, 0),
599 "build_fcn": (TBuilder.Unstack, TGen.tgPooling, ArgGen.agAxes),
600 "types": TYPE_F,
601 },
TatWai Chongf7008da2022-09-09 09:35:40 +0000602 "mirrorpad": {
603 "operands": (1, 0),
604 "build_fcn": (TBuilder.MirrorPad, TGen.tgBasic, ArgGen.agMirrorPad),
605 "types": TYPE_FI,
606 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000607 "pad": {
608 "operands": (1, 0),
609 "build_fcn": (TBuilder.Pad, TGen.tgBasic, ArgGen.agPad),
610 "types": TYPE_F,
611 },
612 "expand_dims": {
613 "operands": (1, 0),
614 "build_fcn": (TBuilder.ExpandDims, TGen.tgBasic, ArgGen.agStack),
615 "types": TYPE_FI,
616 },
617 "shape": {
618 "operands": (1, 0),
619 "build_fcn": (TBuilder.Shape, TGen.tgBasic, ArgGen.agNone),
620 "types": TYPE_FI,
621 },
622 "rank": {
623 "operands": (1, 0),
624 "build_fcn": (TBuilder.Rank, TGen.tgBasic, ArgGen.agNone),
625 "types": TYPE_FI,
626 },
627 "fill": {
628 "operands": (1, 0),
629 "build_fcn": (TBuilder.Fill, TGen.tgBasic, ArgGen.agFill),
630 "types": TYPE_FI,
631 },
632 "elu": {
633 "operands": (1, 0),
634 "build_fcn": (TBuilder.Elu, TGen.tgBasic, ArgGen.agNone),
635 "types": TYPE_F,
636 },
637 "softmax": {
638 "operands": (1, 0),
639 "build_fcn": (TBuilder.Softmax, TGen.tgBasic, ArgGen.agNone),
640 "types": {
641 "tf": TYPE_F,
642 "tflite": list(
643 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
644 ),
645 },
646 },
647 "log_softmax": {
648 "operands": (1, 0),
649 "build_fcn": (TBuilder.LogSoftmax, TGen.tgBasic, ArgGen.agNone),
650 "types": TYPE_F,
651 },
652 "matmul": {
653 "operands": (2, 0),
654 "build_fcn": (TBuilder.MatMul, TGen.tgMatmul, ArgGen.agNone),
655 "types": {
656 "tf": TYPE_F,
657 "tflite": list(
658 TYPE_F
659 + [QuantType.ALL_U8, QuantType.ALL_I8]
660 # 16 bits matmul fail to convert
661 ),
662 },
663 },
664 "add_scalar": {
665 "operands": (1, 0),
666 "build_fcn": (TBuilder.AddScalar, TGen.tgBasic, ArgGen.agNone),
667 "types": TYPE_F,
668 },
669 "add_1d": {
670 "operands": (2, 0),
671 "build_fcn": (TBuilder.Add1d, TGen.tgBasic, ArgGen.agNone),
672 "types": TYPE_F,
673 },
674 "split": {
675 "operands": (1, 0),
676 "build_fcn": (TBuilder.Split, TGen.tgBasic, ArgGen.agSplit),
677 "types": TYPE_FI,
678 },
679 "tile": {
680 "operands": (1, 0),
681 "build_fcn": (TBuilder.Tile, TGen.tgBasic, ArgGen.agTile),
682 "types": TYPE_FI,
683 },
684 "reverse": {
685 "operands": (1, 0),
686 "build_fcn": (TBuilder.Reverse, TGen.tgBasic, ArgGen.agAxes),
687 "types": {"tf": TYPE_FI},
688 },
689 "gather": {
690 "operands": (1, 0),
691 "build_fcn": (TBuilder.Gather, TGen.tgBasic, ArgGen.agGather),
692 "types": TYPE_FI,
693 },
694 "gather_nd": {
695 "operands": (1, 0),
696 "build_fcn": (TBuilder.GatherNd, TGen.tgBasic, ArgGen.agGatherND),
697 "types": TYPE_FI,
698 },
699 "scatter_nd": {
700 "operands": (1, 0),
701 "build_fcn": (TBuilder.ScatterNd, TGen.tgBasic, ArgGen.agScatterND),
702 "types": TYPE_FI,
703 },
704 "space_to_batch": {
705 "operands": (1, 0),
706 "build_fcn": (TBuilder.SpaceToBatch, TGen.tgBasic, ArgGen.agSpaceToBatch),
707 "types": TYPE_F,
708 },
709 "batch_to_space": {
710 "operands": (1, 0),
711 "build_fcn": (TBuilder.BatchToSpace, TGen.tgBasic, ArgGen.agBatchToSpace),
712 "types": TYPE_F,
713 },
714 "space_to_depth": {
715 "operands": (1, 0),
716 "build_fcn": (TBuilder.SpaceToDepth, TGen.tgBasic, ArgGen.agSpaceToDepth),
717 "types": TYPE_F,
718 },
719 "depth_to_space": {
720 "operands": (1, 0),
721 "build_fcn": (TBuilder.DepthToSpace, TGen.tgBasic, ArgGen.agDepthToSpace),
722 "types": TYPE_F,
723 },
724 "one_hot": {
725 "operands": (3, 1),
726 "build_fcn": (TBuilder.OneHot, TGen.tgOneHot, ArgGen.agOneHot),
727 "types": TYPE_FI,
728 },
729 "fakequant": {
730 "operands": (1, 0),
731 "build_fcn": (
732 TBuilder.Fakequant,
733 TGen.tgBasic,
734 ArgGen.agFakequant,
735 ),
736 "types": {"tf": TYPE_F},
737 },
738 "resize_nearest": {
739 "operands": (1, 0),
740 "build_fcn": (TBuilder.ResizeNearest, TGen.tgPooling, ArgGen.agNone),
741 "types": {
742 "tf": TYPE_F,
743 "tflite": list(
744 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
745 ),
746 },
747 },
748 "resize_bilinear": {
749 "operands": (1, 0),
750 "build_fcn": (TBuilder.ResizeBilinear, TGen.tgPooling, ArgGen.agNone),
751 "types": {
752 "tf": TYPE_F,
753 "tflite": list(
754 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
755 ),
756 },
757 },
TatWai Chongf7326092022-06-08 12:17:14 -0700758 "resize_bilinear_v1_align_corners": {
759 "operands": (1, 0),
760 "build_fcn": (
761 TBuilder.ResizeBilinearV1AlignCorners,
762 TGen.tgPooling,
763 ArgGen.agNone,
764 ),
765 "types": {
766 "tf": TYPE_F,
767 "tflite": list(
768 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
769 ),
770 },
771 },
772 "resize_bilinear_v1_none": {
773 "operands": (1, 0),
774 "build_fcn": (TBuilder.ResizeBilinearV1None, TGen.tgPooling, ArgGen.agNone),
775 "types": {
776 "tf": TYPE_F,
777 "tflite": list(
778 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
779 ),
780 },
781 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000782 "left_shift": {
783 "operands": (1, 0),
784 "build_fcn": (TBuilder.LeftShift, TGen.tgBasic, ArgGen.agShift),
785 "types": {"tf": [tf.int32]},
786 },
787 "right_shift": {
788 "operands": (1, 0),
789 "build_fcn": (TBuilder.RightShift, TGen.tgBasic, ArgGen.agShift),
790 "types": {
791 "tf": [
792 tf.int32,
793 ]
794 },
795 },
796}
797
798# Shapes to be tested; default can be overwritten
799shape_list = [
800 (1,),
801 (64,),
802 (14, 19),
803 (13, 21, 3),
804 (1, 4, 4, 4),
805 (1, 8, 4, 17),
806 (1, 4, 8, 19),
807 (1, 32, 32, 8),
808 (1, 7, 7, 9),
TatWai Chongfd629052022-07-25 04:01:58 +0000809 (2, 2, 7, 7, 2),
810 (1, 4, 8, 21, 17),
811 (3, 32, 16, 16, 5),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000812]
813
814
815def gen_rand_shapes(args):
816 """Overwrite the global shape list with a new list of random shapes"""
817 global shape_list
818
819 rng = np.random.default_rng(args.random_seed)
820
821 # Don't let things get too big... cap the maximum volume, but let
822 # an individual dimension be 1..47
823 max_total_volume = 32 * 32 * 4
824
825 shape_list = []
TatWai Chongfd629052022-07-25 04:01:58 +0000826 # Only iterate over ranks 2, 3, 4, and 5
827 for rank in range(2, 6):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000828 for n in range(args.random_shapes):
829 new_shape = rng.integers(1, 48, size=rank)
830
TatWai Chongfd629052022-07-25 04:01:58 +0000831 # Set the batch dimension on 4D or 5D objects to 1
832 if rank == 4 or rank == 5:
Jeremy Johnson015c3552022-02-23 12:15:03 +0000833 new_shape[0] = 1
834
835 # Limit the total shape volume and throw out any
836 # shapes that wouldn't leave at least size=2 in some non-batch dimension
837 volume = 1
838 skip_shape = False
839 for i in range(rank):
840
841 volume *= new_shape[i]
842
843 # Reduce the shape, while it's larger than the maximum volume
844 while volume > max_total_volume:
845 new_shape[i] = new_shape[i] // 2
846 volume = volume // 2
847
848 # Now an untenable dimension size? Skip this one.
849 if new_shape[i] < 1:
850 skip_shape = True
851
852 if not skip_shape:
853 shape_list.append(tuple(new_shape))
854
855
856# Construct, run and save a whole tensorflow tf.function to a protobuf file
857# or convert to .tflite if it's quantized unit test
858def run_unit_test(
859 op_name,
860 args,
861 test_dir,
862 curr_shape,
863 addl_args,
864 dtype,
865 excluded_framework_list,
866 quantized_inference_dtype,
867 result_name,
868 seed,
869):
870
871 try:
872 op = TF_OP_LIST[op_name]
873 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
874
875 # Get and seed a random number generator for this test
876 rng = np.random.default_rng(seed)
877
878 # return placeholders=(str: name, np.array: value)
879 # consts=(str: name, np.array: value)
880 placeholders, consts = tensor_gen_fcn(op, curr_shape, dtype, rng)
881
882 # if test doesn't have any placeholders/consts, terminated
883 if len(placeholders) == 0 and len(consts) == 0:
884 return True
885
886 if not args.quiet:
887 print(" {} ".format(test_dir))
888
889 try:
890 os.mkdir(test_dir)
891 except FileExistsError:
892 pass
893
894 const_nodes = [value for name, value in consts]
895
896 num_placeholders = len(placeholders)
897 # if test is quantized, create tensor quantization metadata info for
898 # each input tensor, based on different quantized type
899 if quantized_inference_dtype:
900 is_quantized = True
901 # TODO: support INT8 IFM x INT4 weight later
902 if quantized_inference_dtype == QuantType.ALL_U8:
903 qzero = [128] * num_placeholders
904 numpy_dtype = [np.uint8] * num_placeholders
905 tflite_inference_dtype = tf.uint8
906 elif quantized_inference_dtype == QuantType.ALL_I8:
907 qzero = [0] * num_placeholders
908 numpy_dtype = [np.int8] * num_placeholders
909 tflite_inference_dtype = tf.int8
910 elif quantized_inference_dtype == QuantType.ALL_I16:
911 qzero = [0] * num_placeholders
912 numpy_dtype = [np.int16] * num_placeholders
913 tflite_inference_dtype = tf.int16
914 elif quantized_inference_dtype == QuantType.CONV_U8_U8:
915 assert (
916 num_placeholders == 1
917 ), "Unsupported number of placeholders for Convolution: {}".format(
918 num_placeholders
919 )
920 qzero = [128] * num_placeholders
921 if num_placeholders == 2:
922 numpy_dtype = [np.uint8, np.uint8]
923 else:
924 numpy_dtype = [np.uint8, np.uint8, np.int32]
925 tflite_inference_dtype = tf.uint8
926 elif quantized_inference_dtype == QuantType.CONV_I8_I8:
927 assert (
928 num_placeholders == 1
929 ), "Unsupported number of placeholders for Convolution: {}".format(
930 num_placeholders
931 )
932 qzero = [0] * num_placeholders
933 if num_placeholders == 2:
934 numpy_dtype = [np.int8, np.int8]
935 else:
936 numpy_dtype = [np.int8, np.int8, np.int32]
937 tflite_inference_dtype = tf.int8
938 elif quantized_inference_dtype == QuantType.CONV_I16_I8:
939 assert (
940 num_placeholders == 1
941 ), "Unsupported number of placeholders for Convolution: {}".format(
942 num_placeholders
943 )
944 if num_placeholders == 2:
945 qzero = [0, 0]
946 numpy_dtype = [np.int16, np.int8]
947 else:
948 qzero = [0, 0, 0]
949 numpy_dtype = [
950 np.int16,
951 np.int8,
952 np.int64,
953 ] # np.int64 to represent 40 bits accumulator
954 tflite_inference_dtype = tf.int16
955 else:
956 raise Exception(
957 "Unsupported fakequant dtype: {}".format(quantized_inference_dtype)
958 )
959
960 else:
961 is_quantized = False
962
963 tf_model_filename = None
964 tf_result_npy_filename = None
965 tf_result_name = None
966
967 tflite_model_filename = None
968 tflite_result_npy_filename = None
969 tflite_result_name = None
970
971 placeholder_names = []
972 placeholder_vals = []
973 placeholder_signatures = ()
974 placeholder_npy_filenames = []
975 placeholder_shapes = []
976
977 for idx, (name, val) in enumerate(placeholders):
978 placeholder_names.append(name)
979 placeholder_signatures = placeholder_signatures + (
980 tf.TensorSpec(shape=val.shape, dtype=val.dtype, name=name),
981 )
982 placeholder_npy_filenames.append("{}.npy".format(name.split(":")[0]))
983 placeholder_shapes.append(val.shape)
984
985 # Get test builder class
986 fcn_node = op_fcn(*const_nodes, *addl_args, result_name)
987 concrete_function = tf.function(input_signature=placeholder_signatures)(
988 fcn_node.eval
989 ).get_concrete_function()
990
991 if is_quantized:
992
993 assert dtype is tf.float32, "quantized test must come from float32 graph"
994
995 # 1. Quantize float placeholder npy to quantized to feed the graph
996 for idx, (name, val) in enumerate(placeholders):
997
998 # we use np.amin()/np.amax() to determine dynamic range
999 # for quantized test
1000 zeropoint = 0
1001 scale = 1.0
1002 if numpy_dtype[idx] != np.int64:
1003 qmin = np.iinfo(numpy_dtype[idx]).min
1004 qmax = np.iinfo(numpy_dtype[idx]).max
1005 num_bits = np.iinfo(numpy_dtype[idx]).bits
1006 # 40 bit is represented as np.int64
1007 else:
1008 num_bits = 40
1009 qmin = -(1 << num_bits)
1010 qmax = (1 << num_bits) - 1
1011
1012 min_val = np.amin(val)
1013 max_val = np.amax(val)
1014
1015 # for single value tensor, we set scale equal to the abs(value),
1016 # and fix zeropoint to 128
1017 # if val > 0, it'll be represented as 129,
1018 # where val = (129 - 128) * val
1019 # if val < 0, it'll be represented as 127,
1020 # where val = (127 - 128) * (-val)
1021 # if val == 0, it'll be represted as 128, with range [-128.0, 128.0]
1022 # and let quantized 1 represent the value
1023 # also adjust effective min/max consequently
1024 if max_val == min_val:
1025 if max_val != 0:
1026 scale = abs(max_val)
1027 else:
1028 scale = 1.0
1029 min_val = float(qmin - qzero[idx]) * scale
1030 max_val = float(qmax - qzero[idx]) * scale
1031 else:
1032 scale = (max_val - min_val) / float(qmax - qmin)
1033 zeropoint = int(round((-min_val) / scale)) + qmin
1034
1035 # run through tf.fakequant first to assure quantization error aligned
1036 fakequant_val = tf.quantization.fake_quant_with_min_max_args(
1037 val,
1038 min=min_val,
1039 max=max_val,
1040 num_bits=num_bits,
1041 name="gen_quant_npy",
1042 )
1043
1044 quant_val = np.round(fakequant_val / scale).astype(np.int32) + zeropoint
1045
1046 # very few unit tests after TF hash may/2020, this quantized
1047 # value for some reason exceed [0, 255] range
1048 saved_val = np.clip(quant_val, qmin, qmax).astype(numpy_dtype[idx])
1049
1050 # saved all quantized tensor as np.int32
1051 # since TOSA numpy Cpp API only supports int32
1052 np.save(
1053 os.path.join(test_dir, placeholder_npy_filenames[idx]),
1054 saved_val.astype(np.int32),
1055 False,
1056 )
1057
1058 placeholder_vals.append(tf.convert_to_tensor(saved_val))
1059
1060 # 2. Convert the model to quantized TFLite flatbuffer
1061 module = tf.Module()
1062 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1063 [concrete_function], module
1064 )
1065 converter.optimizations = [tf.lite.Optimize.DEFAULT]
1066 converter.experimental_new_converter = True
1067
1068 # use MLIR-based post-quantizer
1069 converter.experimental_new_quantizer = True
1070
1071 flag = (
1072 tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 # noqa: E501
1073 )
1074 if tflite_inference_dtype == tf.int16:
1075 converter.target_spec.supported_ops = [flag]
1076
1077 def input_stats():
1078 for i in range(0, args.num_samples):
1079 a = [
1080 TGen.getRand(shape, tf.float32, rng)
1081 for shape in placeholder_shapes
1082 ]
1083 yield a
1084
1085 converter.representative_dataset = input_stats
1086 converter.inference_input_type = tflite_inference_dtype
1087 converter.inference_output_type = tflite_inference_dtype
1088
1089 tflite_model = converter.convert()
1090
1091 tflite_model_filename = "model.tflite"
1092
1093 # Write out converted model to disk
1094 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1095 f.write(tflite_model)
1096
1097 else: # is_quantized is False
1098
1099 # 1. Saved out numpy array directly
1100 for idx, (name, val) in enumerate(placeholders):
1101 placeholder_vals.append(tf.convert_to_tensor(val))
1102 np.save(
1103 os.path.join(test_dir, placeholder_npy_filenames[idx]), val, False
1104 )
1105
1106 # 2.a Saved out .pb if framework includes tensorflow
1107 if "tf" not in excluded_framework_list:
1108 # Write out graph as protobuf to disk
1109 tf_model_filename = "model.pb"
1110 tf.io.write_graph(
1111 concrete_function.graph, test_dir, tf_model_filename, True
1112 )
1113
1114 # 2.b Saved out .tflite if framework includes tflite
1115 if "tflite" not in excluded_framework_list:
1116 # Convert the model to TFLite flatbuffer
1117 module = tf.Module()
1118 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1119 [concrete_function], module
1120 )
1121
1122 converter.experimental_new_converter = True
1123
1124 # Even it's non-quantized int32 test, this needs to be set to tf.float32
1125 converter.inference_input_type = tf.float32
1126 converter.inference_output_type = tf.float32
1127 tflite_model = converter.convert()
1128
1129 # Write out converted model to disk
1130 tflite_model_filename = "model.tflite"
1131 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1132 f.write(tflite_model)
1133
1134 # Get TF reference result if .pb is specified
1135 if tf_model_filename:
1136 tf_result_npy_filename = "tf_result.npy"
1137 tf_result = concrete_function(*placeholder_vals)
1138 np.save(os.path.join(test_dir, tf_result_npy_filename), tf_result, False)
1139
1140 tf_result_name = result_name
1141
1142 # Get TFLite inference result if .tflite is specified
1143 if tflite_model_filename:
1144 tflite_result_npy_filename = "tflite_result.npy"
1145
1146 ops_with_optimized_only_kernel = ["elu", "ceil", "gather"]
1147
1148 if args.tflite_kernel_mode == "optimized" or (
1149 op_name in ops_with_optimized_only_kernel
1150 ):
1151 interpreter = tf.lite.Interpreter(
1152 model_path=os.path.join(test_dir, tflite_model_filename)
1153 )
1154 elif args.tflite_kernel_mode == "reference":
1155 interpreter = tf.lite.Interpreter(
1156 model_path=os.path.join(test_dir, tflite_model_filename),
1157 experimental_op_resolver_type=OpResolverType.BUILTIN_REF,
1158 )
1159 else:
1160 assert 0, "unknown tflite interpreter mode {}".format(
1161 args.tflite_kernel_mode
1162 )
1163 interpreter.allocate_tensors()
1164
1165 input_details = interpreter.get_input_details()
1166 output_details = interpreter.get_output_details()
1167
1168 assert len(input_details) == len(
1169 placeholder_vals
1170 ), "number of placeholder mismatch"
1171
1172 for idx, val in enumerate(placeholder_vals):
1173 interpreter.set_tensor(input_details[idx]["index"], val.numpy())
1174
1175 interpreter.invoke()
1176 tflite_result = interpreter.get_tensor(output_details[0]["index"])
1177
1178 np.save(
1179 os.path.join(test_dir, tflite_result_npy_filename), tflite_result, False
1180 )
1181
1182 # Result tensor name would change after converting to TFLite flatbuffer
1183 # Overwrite the information from TFLite models directly.
1184 # Assume single result tensor now
1185 tflite_result_name = output_details[0]["name"]
1186
1187 # Write out test descriptor
1188 write_test_json(
1189 filename=os.path.join(test_dir, "test.json"),
1190 tf_model_filename=tf_model_filename,
1191 tf_result_npy_filename=tf_result_npy_filename,
1192 tf_result_name=tf_result_name,
1193 tflite_model_filename=tflite_model_filename,
1194 tflite_result_npy_filename=tflite_result_npy_filename,
1195 tflite_result_name=tflite_result_name,
1196 ifm_name=placeholder_names,
1197 ifm_file=placeholder_npy_filenames,
1198 ifm_shape=placeholder_shapes,
1199 framework_exclusions=excluded_framework_list,
1200 quantized=is_quantized,
1201 )
1202 except Exception as e:
1203 msg = "Error running task: {}".format(e)
1204 print(msg)
1205 print(
1206 "".join(
1207 traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)
1208 )
1209 )
1210 return False
1211 return True
1212
1213
1214def build_const_net(
1215 args,
1216 curr_shape,
1217 op_name,
1218 dtype,
1219 excluded_framework_list,
1220 quantized_inference_dtype,
1221 result_name,
1222 seed,
1223 rng,
1224 filter,
1225 unit_test_args,
1226):
1227
1228 if quantized_inference_dtype:
1229 quant_dtype = get_tf_dtype(quantized_inference_dtype)
1230 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, quant_dtype))
1231 else:
1232 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, dtype))
1233 test_dir = os.path.join(args.output_dir, test_dir)
1234
1235 # If the operator has an additional function to generate arguments, call it
1236 # here and iterate through the argument list that it generates
1237 op = TF_OP_LIST[op_name]
1238 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1239
TatWai Chongfd629052022-07-25 04:01:58 +00001240 try:
1241 rank_lo, rank_hi = op["rank"]
1242 except KeyError:
1243 # Set testing rank to (1, 4) in default.
1244 rank_lo = 1
1245 rank_hi = 4
1246
1247 if len(curr_shape) not in range(rank_lo, rank_hi + 1):
1248 return
1249
Jeremy Johnson015c3552022-02-23 12:15:03 +00001250 addl_args_tuple = arg_gen_fcn(op, curr_shape, rng)
1251 for desc, addl_args in addl_args_tuple:
Jeremy Johnson0e6218e2022-05-05 17:08:04 +01001252 # Only filter on the full test_name, not the output directory
1253 _, test_name = os.path.split(test_dir + desc)
1254 if not filter or filter.search(test_name):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001255 unit_test_args.append(
1256 [
1257 op_name,
1258 args,
1259 test_dir + desc,
1260 curr_shape,
1261 addl_args,
1262 dtype,
1263 excluded_framework_list,
1264 quantized_inference_dtype,
1265 result_name,
1266 seed,
1267 ]
1268 )
1269
1270
1271# python hash is not reproducible, create hash for our purpose
1272def op_name_hash(op_name):
1273 result = 0xDEADBEEF
1274 for ch in op_name:
1275 if result & 1:
1276 result = (ord(ch) << 24) ^ (result >> 1) ^ 0x82608EDB
1277 else:
1278 result = (ord(ch) << 24) ^ (result >> 1)
1279
1280 return result
1281
1282
1283def generate_op_tests(args, op_name, shape_list, result_name, filter, unit_test_args):
1284
1285 if not args.quiet:
1286 print(
1287 "Generating tests for {} ".format(
1288 op_name
1289 )
1290 )
1291
1292 op = TF_OP_LIST[op_name]
1293
1294 # Seed the RNG so that we get the same random tests for each test each time
1295 # If the number of tests for a given generation function changes, the tests
1296 # for that operator may also change accordingly, but this will at least keep
1297 # down churn across operators.
1298
1299 bounded_hash_val = (args.random_seed + op_name_hash(op_name)) % np.iinfo(
1300 np.int32
1301 ).max
1302 rng = np.random.default_rng(bounded_hash_val)
1303
1304 # this is a dictionary with 'tf' and 'tflite' as key
1305 # and value being the data types we want to test under these framework
1306
1307 if isinstance(op["types"], dict):
1308 try:
1309 tf_dtypes = op["types"]["tf"]
1310 except KeyError:
1311 tf_dtypes = []
1312 try:
1313 tflite_dtypes = op["types"]["tflite"]
1314 except KeyError:
1315 tflite_dtypes = []
1316 elif isinstance(op["types"], list):
1317 tf_dtypes = op["types"]
1318 tflite_dtypes = op["types"]
1319
1320 tf_nonquantized_dtypes = tf_dtypes # tf doesn't support quantized data types
1321 tflite_quantized_dtypes = []
1322 tflite_nonquantized_dtypes = []
1323 for dtype in tflite_dtypes:
1324 if isinstance(dtype, QuantType):
1325 tflite_quantized_dtypes.append(dtype)
1326 else:
1327 tflite_nonquantized_dtypes.append(dtype)
1328
1329 nonquantized_dtypes_set = set(tf_nonquantized_dtypes).union(
1330 set(tflite_nonquantized_dtypes)
1331 )
1332 nonquantized_dtypes = list(nonquantized_dtypes_set)
1333 quantized_dtypes = tflite_quantized_dtypes
1334
1335 # populate non quantized unit test arguments
1336 for dtype in nonquantized_dtypes:
1337
1338 excluded_framework_set = set(ALL_FRAMEWORKS)
1339 if dtype in tf_nonquantized_dtypes:
1340 excluded_framework_set.remove("tf")
1341 if dtype in tflite_nonquantized_dtypes:
1342 excluded_framework_set.remove("tflite")
1343 excluded_framework_list = list(excluded_framework_set)
1344
1345 for curr_shape in shape_list:
1346 build_const_net(
1347 args,
1348 curr_shape,
1349 op_name,
1350 dtype,
1351 excluded_framework_list,
1352 None,
1353 result_name,
1354 bounded_hash_val,
1355 rng,
1356 filter,
1357 unit_test_args,
1358 )
1359
1360 # populate quantized unit test arguments
1361 # must exclude 'tf' and source dtype being tf.float32
1362 for dtype in quantized_dtypes:
1363 for curr_shape in shape_list:
1364 build_const_net(
1365 args,
1366 curr_shape,
1367 op_name,
1368 tf.float32,
1369 ["tf"],
1370 dtype,
1371 result_name,
1372 bounded_hash_val,
1373 rng,
1374 filter,
1375 unit_test_args,
1376 )
1377
1378 return unit_test_args
1379
1380
1381def createDynamicOpLists():
1382 """The templated operators are conv2d-style operators with a number of kernel
1383 sizes. Since the operator is unchanged, we generate the range of kernel
1384 sizes here in this loop and remove the original templates from the list.
1385
1386 This could be expanded to non-conv2d-style operators in the future."""
1387
1388 # Dynamically create op lists for convolutions with a list of kernel sizes
1389 KERNELS = [
1390 [1, 1],
1391 [3, 3],
1392 [5, 5],
1393 ]
1394
TatWai Chongfd629052022-07-25 04:01:58 +00001395 # dim = [D, H, W]
1396 KERNELS_3D = [
1397 [1, 1, 1],
1398 [2, 3, 3],
1399 [3, 5, 5],
1400 ]
1401
Jeremy Johnson015c3552022-02-23 12:15:03 +00001402 TEMPLATE_LIST = [
1403 "conv2d",
1404 "conv2d_bias",
1405 "conv2d_relu",
1406 "conv2d_relu6",
1407 "conv2d_relu_n1_to_1",
1408 "conv2d_tanh",
1409 "depthwise_conv2d",
1410 "depthwise_conv2d_bias",
1411 "transpose_conv2d",
1412 ]
1413
TatWai Chongfd629052022-07-25 04:01:58 +00001414 TEMPLATE_LIST_CONV3D = [
1415 "conv3d",
1416 "conv3d_bias",
1417 ]
1418
Jeremy Johnson015c3552022-02-23 12:15:03 +00001419 for t in TEMPLATE_LIST:
1420 for k in KERNELS:
1421 testName = "{}_{}x{}".format(t, k[0], k[1])
1422 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1423 TF_OP_LIST[testName]["filter"] = k
1424 TF_OP_LIST[testName]["template"] = False
1425
TatWai Chongfd629052022-07-25 04:01:58 +00001426 # The existing operators don't support the dimension of kernel that is higher than 2.
1427 for t in TEMPLATE_LIST_CONV3D:
1428 for k in KERNELS_3D:
1429 testName = "{}_{}x{}x{}".format(t, k[0], k[1], k[2])
1430 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1431 TF_OP_LIST[testName]["filter"] = k
1432 TF_OP_LIST[testName]["template"] = False
1433
Jeremy Johnson015c3552022-02-23 12:15:03 +00001434 # Delete any templates after having created any dynamic ops
1435 # This is a two-pass operation because it's bad practice to delete
1436 # keys from dictionaries while iterating
1437 keyList = []
1438 for k in TF_OP_LIST:
1439 try:
1440 if TF_OP_LIST[k]["template"]:
1441 keyList.append(k)
1442 continue
1443 except KeyError:
1444 pass
1445
1446 for k in keyList:
1447 del TF_OP_LIST[k]
1448
1449
1450def main():
1451 parser = argparse.ArgumentParser()
1452 parser.add_argument(
1453 "--seed", dest="random_seed", default=42, type=int, help="Random seed"
1454 )
1455 parser.add_argument(
1456 "--random-shapes",
1457 dest="random_shapes",
1458 default=0,
1459 type=int,
1460 help=(
1461 "Use N random shapes of each rank for generating tests,"
1462 "seeded with random seed"
1463 ),
1464 )
1465 parser.add_argument(
1466 "-o",
1467 "--output-dir",
1468 dest="output_dir",
1469 default=".",
1470 type=str,
1471 help="Test output directory path prefix",
1472 )
1473 parser.add_argument(
1474 "-q",
1475 "--quiet",
1476 dest="quiet",
1477 default=False,
1478 action="store_true",
1479 help="Do not print test names",
1480 )
1481 parser.add_argument(
1482 "-j", "--jobs", dest="jobs", type=int, default=1, help="Number of parallel jobs"
1483 )
1484 parser.add_argument(
1485 "-m",
1486 "--tflite-kernel-mode",
1487 dest="tflite_kernel_mode",
1488 type=str,
1489 choices=["reference", "optimized"],
1490 default="reference",
1491 help="TFLite interpreter kernel mode",
1492 )
1493 parser.add_argument(
1494 "--num-samples",
1495 dest="num_samples",
1496 default=200,
1497 type=int,
1498 help="Number of input samples for post-training quantization",
1499 )
1500 parser.add_argument(
1501 "--filter",
1502 dest="filter",
1503 default="",
1504 type=str,
1505 help="Filter test names by this expression",
1506 )
1507 args = parser.parse_args()
1508
1509 # Turn the filter into a re object if present
1510 filter = None
1511 if args.filter != "":
1512 filter = re.compile(args.filter)
1513
1514 # Autodetect CPU count
1515 if args.jobs <= 0:
1516 args.jobs = os.cpu_count()
1517
1518 # Disable TF info messages
1519 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
1520
1521 try:
1522 os.makedirs(args.output_dir)
1523 except FileExistsError:
1524 pass
1525
1526 if args.random_shapes:
1527 gen_rand_shapes(args)
1528
1529 # Build dynamic ops
1530 createDynamicOpLists()
1531
1532 # Generate the test list and arguments to run_unit_test()
1533 unit_test_args = []
1534
1535 for op in TF_OP_LIST:
1536 generate_op_tests(args, op, shape_list, "result", filter, unit_test_args)
1537
1538 errors = 0
1539 for t in unit_test_args:
1540 if not run_unit_test(*t):
1541 errors = errors + 1
1542
1543 if not args.quiet:
1544 print("\nAll tasks done - with {} errors".format(errors))
1545
1546 return 1 if errors else 0
1547
1548
1549if __name__ == "__main__":
1550 exit(main())