blob: 097fe1fbe6222a1642cf1b0af22ce0beb16ee59b [file] [log] [blame]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001#!/usr/bin/env python3
2# Copyright (c) 2020-2022, ARM Limited.
3# SPDX-License-Identifier: Apache-2.0
4import argparse
5import os
6import re
7import traceback
8
9import numpy as np
10
11# Level | Level for Humans | Level Description
12# -------|------------------|------------------------------------
13# 0 | DEBUG | [Default] Print all messages
14# 1 | INFO | Filter out INFO messages
15# 2 | WARNING | Filter out INFO & WARNING messages
16# 3 | ERROR | Filter out all messages
17# Filter tensorflow debug message except errors
18os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
19
20# Flake8 E402 - ignore imports not at top of file to allow os.environ setting
21import tensorflow as tf # noqa: E402
22from frameworks.write_test_json import write_test_json # noqa: E402
23from frameworks.arg_gen import ArgGen # noqa: E402
24from frameworks.tensor_gen import TGen # noqa: E402
25from frameworks.test_builder import TBuilder # noqa: E402
Jeremy Johnson5d1a3472022-03-31 09:50:06 +010026from frameworks.test_gen_utils import ( # noqa: E402
Jeremy Johnson015c3552022-02-23 12:15:03 +000027 QuantType,
28 get_tf_dtype,
29 get_shape_str,
30) # noqa: E402
31from tensorflow.lite.python.interpreter import OpResolverType # noqa: E402
32
33# All of the supported frameworks
34ALL_FRAMEWORKS = ["tf", "tflite"]
35
36# Lists of different data types
37TYPE_F = [tf.float32]
38TYPE_I = [tf.int32]
39TYPE_FI = [tf.float32, tf.int32]
40TYPE_B = [tf.bool]
41TYPE_FIB = [tf.float32, tf.int32, tf.bool]
42TYPE_H = [tf.float16]
43TYPE_FH = [tf.float32, tf.float16]
44TYPE_FHI = [tf.float32, tf.float16, tf.int32]
45TYPE_FHIB = [tf.float32, tf.float16, tf.int32, tf.bool]
46
47# The list of operator tests
48# Each dictionary entry for an op is a dictionary with the following required members:
49# 'operands': tuple (number_of_placeholder_tensors, number_of_constant_tensors)
50# 'build_fcn: tuple (Test builder function, Tensor generator function,
51# Argument generator function)
52# 'types': list of Tensorflow types that should be tested for this op
53# OR
54# a dictionary of {'framework_name': [type_list] } for cases where only
55# a subset of the types should be tested in each framework. This can also
56# be used to restrict an operator to a particular framework.
57#
58# And optional members:
59# 'template': boolean (indicates that this is a templated op which gets further
60# processing in createDynamicOpLists)
61# 'bias': boolean indicating that there is a bias component to be generated
62# 'qtypes': List of QuantType quantized types to generate for this op
63
64TF_OP_LIST = {
65 "add": {
66 "operands": (2, 0),
67 "build_fcn": (TBuilder.Add, TGen.tgBFuzz, ArgGen.agNone),
68 "types": {
69 "tf": TYPE_FI,
70 "tflite": list(
71 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
72 ),
73 },
74 },
75 "sub": {
76 "operands": (2, 0),
77 "build_fcn": (TBuilder.Sub, TGen.tgBFuzz, ArgGen.agNone),
78 "types": {
79 "tf": TYPE_FI,
80 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
81 # QuantType.ALL_I16 fail in TFLite conversion
82 },
83 },
84 "mul": {
85 "operands": (2, 0),
86 "build_fcn": (TBuilder.Mul, TGen.tgBFuzz, ArgGen.agNone),
87 "types": {
88 "tf": TYPE_FI,
89 "tflite": list(
90 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
91 ),
92 },
93 },
94 "exp": {
95 "operands": (1, 0),
96 "build_fcn": (TBuilder.Exp, TGen.tgBasic, ArgGen.agNone),
97 "types": TYPE_F,
98 },
99 "rcp": {
100 "operands": (1, 0),
101 "build_fcn": (TBuilder.Rcp, TGen.tgBasic, ArgGen.agNone),
102 "types": TYPE_F,
103 },
104 "relu": {
105 "operands": (1, 0),
106 "build_fcn": (TBuilder.Relu, TGen.tgBasic, ArgGen.agNone),
107 "types": {
108 "tf": TYPE_F,
109 "tflite": list(
110 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
111 ),
112 },
113 },
Jerry Ge93912432022-07-22 10:29:13 -0700114 "relu1": {
115 "operands": (1, 0),
116 "build_fcn": (TBuilder.Relu1, TGen.tgBasic, ArgGen.agNone),
117 "types": {
118 "tf": [],
119 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
120 },
121 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000122 "relu6": {
123 "operands": (1, 0),
124 "build_fcn": (TBuilder.Relu6, TGen.tgBasic, ArgGen.agNone),
125 "types": {
126 "tf": TYPE_F,
127 "tflite": list(
128 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
129 ),
130 },
131 },
132 "leaky_relu": {
133 "operands": (1, 0),
134 "build_fcn": (TBuilder.LeakyRelu, TGen.tgBasic, ArgGen.agFloat),
135 "types": {
136 "tf": TYPE_F,
137 "tflite": list(
138 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
139 ),
140 },
141 },
TatWai Chong473eb382022-08-02 04:21:30 +0000142 "gelu": {
143 "operands": (1, 0),
144 "build_fcn": (TBuilder.Gelu, TGen.tgBasic, ArgGen.agNone),
145 "types": {
146 # Need compiler support for tf.Erf.
147 # "tf": TYPE_F,
148 "tflite": list(
149 # Only float32, int8 and uint8 supported currently
150 TYPE_F
151 + [QuantType.ALL_U8, QuantType.ALL_I8]
152 ),
153 },
154 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000155 "concat": {
156 "operands": (2, 0),
157 "build_fcn": (TBuilder.Concat, TGen.tgBasic, ArgGen.agAxes),
158 "types": TYPE_FI,
159 },
160 "bitwise_and": {
161 "operands": (2, 0),
162 "build_fcn": (TBuilder.BitwiseAnd, TGen.tgBFuzz, ArgGen.agNone),
163 "types": {"tf": TYPE_I}, # Not supported in TF Lite
164 },
165 "bitwise_or": {
166 "operands": (2, 0),
167 "build_fcn": (TBuilder.BitwiseOr, TGen.tgBFuzz, ArgGen.agNone),
168 "types": {"tf": TYPE_I}, # Not supported in TF Lite
169 },
170 "bitwise_not": {
171 "operands": (1, 0),
172 "build_fcn": (TBuilder.BitwiseNot, TGen.tgBFuzz, ArgGen.agNone),
173 "types": {"tf": TYPE_I}, # Not supported in TF Lite
174 },
175 "bitwise_xor": {
176 "operands": (2, 0),
177 "build_fcn": (TBuilder.BitwiseXor, TGen.tgBFuzz, ArgGen.agNone),
178 "types": {"tf": TYPE_I}, # Not supported in TF Lite
179 },
180 "logical_and": {
181 "operands": (2, 0),
182 "build_fcn": (TBuilder.LogicalAnd, TGen.tgBFuzz, ArgGen.agNone),
183 "types": TYPE_B,
184 },
185 "logical_or": {
186 "operands": (2, 0),
187 "build_fcn": (TBuilder.LogicalOr, TGen.tgBFuzz, ArgGen.agNone),
188 "types": TYPE_B,
189 },
190 "logical_not": {
191 "operands": (1, 0),
192 "build_fcn": (TBuilder.LogicalNot, TGen.tgBFuzz, ArgGen.agNone),
193 "types": TYPE_B,
194 },
195 "reduce_any": {
196 "operands": (1, 0),
197 "build_fcn": (TBuilder.ReduceAny, TGen.tgBasic, ArgGen.agAxesListKeepdims),
198 "types": TYPE_B,
199 },
200 "reduce_all": {
201 "operands": (1, 0),
202 "build_fcn": (TBuilder.ReduceAll, TGen.tgBasic, ArgGen.agAxesListKeepdims),
203 "types": {"tf": TYPE_B},
204 },
205 "reduce_min": {
206 "operands": (1, 0),
207 "build_fcn": (TBuilder.ReduceMin, TGen.tgBasic, ArgGen.agAxesListKeepdims),
208 "types": {
209 "tf": TYPE_FI,
210 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
211 },
212 },
213 "reduce_max": {
214 "operands": (1, 0),
215 "build_fcn": (TBuilder.ReduceMax, TGen.tgBasic, ArgGen.agAxesListKeepdims),
216 "types": {
217 "tf": TYPE_FI,
218 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
219 },
220 },
221 "reduce_sum": {
222 "operands": (1, 0),
223 "build_fcn": (TBuilder.ReduceSum, TGen.tgBasic, ArgGen.agAxesListKeepdims),
224 "types": {
225 "tf": TYPE_F,
226 # v2 converter doesn't recognize quantized reduce_sum
227 # "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
228 "tflite": TYPE_F,
229 },
230 },
231 "reduce_mean": {
232 "operands": (1, 0),
233 "build_fcn": (TBuilder.ReduceMean, TGen.tgBasic, ArgGen.agAxesListKeepdims),
234 "types": {
235 "tf": TYPE_F,
236 "tflite": list(
237 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
238 ),
239 },
240 },
241 "reduce_product": {
242 "operands": (1, 0),
243 "build_fcn": (TBuilder.ReduceProduct, TGen.tgBasic, ArgGen.agAxesListKeepdims),
244 "types": TYPE_F,
245 },
246 "min": {
247 "operands": (2, 0),
248 "build_fcn": (TBuilder.Min, TGen.tgBFuzz, ArgGen.agNone),
249 "types": TYPE_FI,
250 },
251 "max": {
252 "operands": (2, 0),
253 "build_fcn": (TBuilder.Max, TGen.tgBFuzz, ArgGen.agNone),
254 "types": TYPE_FI,
255 },
256 "pow": {
257 "operands": (2, 0),
258 "build_fcn": (TBuilder.Pow, TGen.tgBFuzz, ArgGen.agNone),
259 # Technically, integer is supported, but only for positive exponents.
260 # Needs a random argument generator.
261 "types": TYPE_F,
262 },
263 "abs": {
264 "operands": (1, 0),
265 "build_fcn": (TBuilder.Abs, TGen.tgBasic, ArgGen.agNone),
266 "types": TYPE_F,
267 },
268 "ceil": {
269 "operands": (1, 0),
270 "build_fcn": (TBuilder.Ceil, TGen.tgBasic, ArgGen.agNone),
271 "types": TYPE_F,
272 },
273 "floor": {
274 "operands": (1, 0),
275 "build_fcn": (TBuilder.Floor, TGen.tgBasic, ArgGen.agNone),
276 "types": TYPE_F,
277 },
278 "log": {
279 "operands": (1, 0),
280 "build_fcn": (TBuilder.Log, TGen.tgBasic, ArgGen.agNone),
281 "types": TYPE_F,
282 },
283 "negate": {
284 "operands": (1, 0),
285 "build_fcn": (TBuilder.Negate, TGen.tgBasic, ArgGen.agNone),
286 "types": TYPE_F,
287 },
288 "rsqrt": {
289 "operands": (1, 0),
290 "build_fcn": (TBuilder.Rsqrt, TGen.tgBasic, ArgGen.agNone),
291 "types": TYPE_F,
292 },
293 "sigmoid": {
294 "operands": (1, 0),
295 "build_fcn": (TBuilder.Sigmoid, TGen.tgBasic, ArgGen.agNone),
296 "types": {
297 "tf": TYPE_F,
298 "tflite": list(
299 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
300 ),
301 },
302 },
303 "tanh": {
304 "operands": (1, 0),
305 "build_fcn": (TBuilder.Tanh, TGen.tgBasic, ArgGen.agNone),
306 "types": {
307 "tf": TYPE_F,
308 "tflite": list(
309 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
310 ),
311 },
312 },
313 "square": {
314 "operands": (1, 0),
315 "build_fcn": (TBuilder.Square, TGen.tgBasic, ArgGen.agNone),
316 "types": TYPE_F,
317 },
318 "squared_difference": {
319 "operands": (2, 0),
320 "build_fcn": (TBuilder.SquaredDifference, TGen.tgBFuzz, ArgGen.agNone),
321 "types": TYPE_F,
322 },
323 "equal": {
324 "operands": (2, 0),
325 "build_fcn": (TBuilder.Equal, TGen.tgBFuzz, ArgGen.agNone),
326 "types": TYPE_FI,
327 },
328 "greater_equal": {
329 "operands": (2, 0),
330 "build_fcn": (TBuilder.GreaterEqual, TGen.tgBFuzz, ArgGen.agNone),
331 "types": TYPE_FI,
332 },
333 "greater": {
334 "operands": (2, 0),
335 "build_fcn": (TBuilder.Greater, TGen.tgBFuzz, ArgGen.agNone),
336 "types": TYPE_FI,
337 },
338 "less": {
339 "operands": (2, 0),
340 "build_fcn": (TBuilder.Less, TGen.tgBFuzz, ArgGen.agNone),
341 "types": TYPE_FI,
342 },
343 "less_equal": {
344 "operands": (2, 0),
345 "build_fcn": (TBuilder.LessEqual, TGen.tgBFuzz, ArgGen.agNone),
346 "types": TYPE_FI,
347 },
348 "conv2d_TEMPLATE": {
349 "operands": (1, 1),
350 "build_fcn": (TBuilder.Conv2d, TGen.tgConv2d, ArgGen.agConv2d),
351 "types": {
352 "tf": [tf.float32],
353 "tflite": [
354 tf.float32,
355 QuantType.CONV_U8_U8,
356 QuantType.CONV_I8_I8,
357 QuantType.CONV_I16_I8,
358 ],
359 },
360 "template": True,
361 },
362 "conv2d_relu_TEMPLATE": {
363 "operands": (1, 2),
364 "build_fcn": (TBuilder.Conv2dRelu, TGen.tgConv2d, ArgGen.agNone),
365 "types": {
366 "tf": [tf.float32],
367 "tflite": [
368 tf.float32,
369 QuantType.CONV_U8_U8,
370 QuantType.CONV_I8_I8,
371 QuantType.CONV_I16_I8,
372 ],
373 },
374 "template": True,
375 },
376 "conv2d_relu6_TEMPLATE": {
377 "operands": (1, 2),
378 "build_fcn": (TBuilder.Conv2dRelu6, TGen.tgConv2d, ArgGen.agNone),
379 "types": {
380 "tf": [tf.float32],
381 "tflite": [
382 tf.float32,
383 QuantType.CONV_U8_U8,
384 QuantType.CONV_I8_I8,
385 QuantType.CONV_I16_I8,
386 ],
387 },
388 "template": True,
389 },
390 "conv2d_relu_n1_to_1_TEMPLATE": {
391 "operands": (1, 2),
392 "build_fcn": (TBuilder.Conv2dReluN1To1, TGen.tgConv2d, ArgGen.agNone),
393 "types": {
394 "tf": [tf.float32],
395 "tflite": [
396 tf.float32,
397 QuantType.CONV_U8_U8,
398 QuantType.CONV_I8_I8,
399 QuantType.CONV_I16_I8,
400 ],
401 },
402 "template": True,
403 },
404 # This test is converted as:
405 # tfl.conv2d(){fused_activation_function="NONE"} + tfl.tanh()
406 # TODO: anyway to generate tfl.conv2d(){fused_activation_function="TANH"}?
407 "conv2d_tanh_TEMPLATE": {
408 "operands": (1, 2),
409 "build_fcn": (TBuilder.Conv2dTanh, TGen.tgConv2d, ArgGen.agNone),
410 "types": {
411 "tf": [tf.float32],
412 "tflite": [
413 tf.float32,
414 QuantType.CONV_U8_U8,
415 QuantType.CONV_I8_I8,
416 QuantType.CONV_I16_I8,
417 ],
418 },
419 "template": True,
420 },
421 "conv2d_bias_TEMPLATE": {
422 "operands": (1, 2),
423 "build_fcn": (TBuilder.Conv2dWithBias, TGen.tgConv2d, ArgGen.agConv2d),
424 "types": {
425 "tf": [tf.float32],
426 "tflite": [
427 tf.float32,
428 QuantType.CONV_U8_U8,
429 QuantType.CONV_I8_I8,
430 QuantType.CONV_I16_I8,
431 ],
432 },
433 "bias": True,
434 "template": True,
435 },
436 "depthwise_conv2d_TEMPLATE": {
437 "operands": (1, 1),
438 "build_fcn": (
439 TBuilder.DepthwiseConv2d,
440 TGen.tgDepthwiseConv2d,
441 ArgGen.agDepthwiseConv2d,
442 ),
443 "types": {
444 "tf": [tf.float32],
445 "tflite": [
446 tf.float32,
447 QuantType.CONV_U8_U8,
448 QuantType.CONV_I8_I8,
449 QuantType.CONV_I16_I8,
450 ],
451 },
452 "template": True,
453 },
454 "depthwise_conv2d_bias_TEMPLATE": {
455 "operands": (1, 2),
456 "build_fcn": (
457 TBuilder.DepthwiseConv2dWithBias,
458 TGen.tgDepthwiseConv2d,
459 ArgGen.agDepthwiseConv2d,
460 ),
461 "types": {
462 "tf": [tf.float32],
463 "tflite": [
464 tf.float32,
465 QuantType.CONV_U8_U8,
466 QuantType.CONV_I8_I8,
467 QuantType.CONV_I16_I8,
468 ],
469 },
470 "bias": True,
471 "template": True,
472 },
473 "transpose_conv2d_TEMPLATE": {
474 "operands": (1, 1),
475 "build_fcn": (
476 TBuilder.TransposeConv2d,
477 TGen.tgTransposeConv2d,
478 ArgGen.agTransposeConv2d,
479 ),
480 "types": {
481 "tf": [tf.float32],
482 "tflite": [
483 tf.float32,
484 QuantType.CONV_U8_U8,
485 QuantType.CONV_I8_I8,
486 QuantType.CONV_I16_I8,
487 ],
488 },
489 "template": True,
490 },
491 "argmax": {
492 "operands": (1, 0),
493 "build_fcn": (TBuilder.Argmax, TGen.tgBasic, ArgGen.agAxes),
494 "types": {"tf": TYPE_F},
495 },
496 "avg_pool2d": {
497 "operands": (1, 0),
498 "build_fcn": (TBuilder.AvgPool2d, TGen.tgPooling, ArgGen.agPooling),
499 "types": {
500 "tf": TYPE_F,
501 "tflite": list(
502 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
503 ),
504 },
505 },
506 "max_pool2d": {
507 "operands": (1, 0),
508 "build_fcn": (TBuilder.MaxPool2d, TGen.tgPooling, ArgGen.agPooling),
509 "types": {
510 "tf": TYPE_F,
511 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
512 # ALL_I16 not supported yet
513 # In tensorflow/compiler/mlir/lite/ir/tfl_ops.td,
514 # QI16 is missing from MaxPoolOperandAndResultConstraints
515 # If adding QI16 back this test can run through.
516 },
517 },
518 "reshape": {
519 "operands": (1, 0),
520 "build_fcn": (TBuilder.Reshape, TGen.tgBasic, ArgGen.agReshape),
521 "types": TYPE_FI,
522 },
523 "transpose": {
524 "operands": (1, 0),
525 "build_fcn": (TBuilder.Transpose, TGen.tgBasic, ArgGen.agTranspose),
526 "types": TYPE_FI,
527 },
528 "slice": {
529 "operands": (1, 0),
530 "build_fcn": (TBuilder.Slice, TGen.tgBasic, ArgGen.agSlice),
531 "types": TYPE_FI,
532 },
533 "strided_slice": {
534 "operands": (1, 0),
535 "build_fcn": (TBuilder.StridedSlice, TGen.tgBasic, ArgGen.agStridedSlice),
536 "types": TYPE_FI,
537 },
538 "select": {
539 "operands": (3, 0),
540 "build_fcn": (TBuilder.Select, TGen.tgSelect, ArgGen.agNone),
541 "types": TYPE_FI,
542 },
543 "addn": {
544 "operands": (4, 0),
545 "build_fcn": (TBuilder.Addn, TGen.tgBasic, ArgGen.agNone),
546 "types": TYPE_FI,
547 },
548 "concatv2": {
549 "operands": (4, 0),
550 "build_fcn": (TBuilder.Concatv2, TGen.tgBasic, ArgGen.agAxes),
551 "types": TYPE_FI,
552 },
553 "stack": {
554 "operands": (4, 0),
555 "build_fcn": (TBuilder.Stack, TGen.tgBasic, ArgGen.agStack),
556 "types": TYPE_FI,
557 },
558 "unstack": {
559 "operands": (1, 0),
560 "build_fcn": (TBuilder.Unstack, TGen.tgPooling, ArgGen.agAxes),
561 "types": TYPE_F,
562 },
563 "pad": {
564 "operands": (1, 0),
565 "build_fcn": (TBuilder.Pad, TGen.tgBasic, ArgGen.agPad),
566 "types": TYPE_F,
567 },
568 "expand_dims": {
569 "operands": (1, 0),
570 "build_fcn": (TBuilder.ExpandDims, TGen.tgBasic, ArgGen.agStack),
571 "types": TYPE_FI,
572 },
573 "shape": {
574 "operands": (1, 0),
575 "build_fcn": (TBuilder.Shape, TGen.tgBasic, ArgGen.agNone),
576 "types": TYPE_FI,
577 },
578 "rank": {
579 "operands": (1, 0),
580 "build_fcn": (TBuilder.Rank, TGen.tgBasic, ArgGen.agNone),
581 "types": TYPE_FI,
582 },
583 "fill": {
584 "operands": (1, 0),
585 "build_fcn": (TBuilder.Fill, TGen.tgBasic, ArgGen.agFill),
586 "types": TYPE_FI,
587 },
588 "elu": {
589 "operands": (1, 0),
590 "build_fcn": (TBuilder.Elu, TGen.tgBasic, ArgGen.agNone),
591 "types": TYPE_F,
592 },
593 "softmax": {
594 "operands": (1, 0),
595 "build_fcn": (TBuilder.Softmax, TGen.tgBasic, ArgGen.agNone),
596 "types": {
597 "tf": TYPE_F,
598 "tflite": list(
599 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
600 ),
601 },
602 },
603 "log_softmax": {
604 "operands": (1, 0),
605 "build_fcn": (TBuilder.LogSoftmax, TGen.tgBasic, ArgGen.agNone),
606 "types": TYPE_F,
607 },
608 "matmul": {
609 "operands": (2, 0),
610 "build_fcn": (TBuilder.MatMul, TGen.tgMatmul, ArgGen.agNone),
611 "types": {
612 "tf": TYPE_F,
613 "tflite": list(
614 TYPE_F
615 + [QuantType.ALL_U8, QuantType.ALL_I8]
616 # 16 bits matmul fail to convert
617 ),
618 },
619 },
620 "add_scalar": {
621 "operands": (1, 0),
622 "build_fcn": (TBuilder.AddScalar, TGen.tgBasic, ArgGen.agNone),
623 "types": TYPE_F,
624 },
625 "add_1d": {
626 "operands": (2, 0),
627 "build_fcn": (TBuilder.Add1d, TGen.tgBasic, ArgGen.agNone),
628 "types": TYPE_F,
629 },
630 "split": {
631 "operands": (1, 0),
632 "build_fcn": (TBuilder.Split, TGen.tgBasic, ArgGen.agSplit),
633 "types": TYPE_FI,
634 },
635 "tile": {
636 "operands": (1, 0),
637 "build_fcn": (TBuilder.Tile, TGen.tgBasic, ArgGen.agTile),
638 "types": TYPE_FI,
639 },
640 "reverse": {
641 "operands": (1, 0),
642 "build_fcn": (TBuilder.Reverse, TGen.tgBasic, ArgGen.agAxes),
643 "types": {"tf": TYPE_FI},
644 },
645 "gather": {
646 "operands": (1, 0),
647 "build_fcn": (TBuilder.Gather, TGen.tgBasic, ArgGen.agGather),
648 "types": TYPE_FI,
649 },
650 "gather_nd": {
651 "operands": (1, 0),
652 "build_fcn": (TBuilder.GatherNd, TGen.tgBasic, ArgGen.agGatherND),
653 "types": TYPE_FI,
654 },
655 "scatter_nd": {
656 "operands": (1, 0),
657 "build_fcn": (TBuilder.ScatterNd, TGen.tgBasic, ArgGen.agScatterND),
658 "types": TYPE_FI,
659 },
660 "space_to_batch": {
661 "operands": (1, 0),
662 "build_fcn": (TBuilder.SpaceToBatch, TGen.tgBasic, ArgGen.agSpaceToBatch),
663 "types": TYPE_F,
664 },
665 "batch_to_space": {
666 "operands": (1, 0),
667 "build_fcn": (TBuilder.BatchToSpace, TGen.tgBasic, ArgGen.agBatchToSpace),
668 "types": TYPE_F,
669 },
670 "space_to_depth": {
671 "operands": (1, 0),
672 "build_fcn": (TBuilder.SpaceToDepth, TGen.tgBasic, ArgGen.agSpaceToDepth),
673 "types": TYPE_F,
674 },
675 "depth_to_space": {
676 "operands": (1, 0),
677 "build_fcn": (TBuilder.DepthToSpace, TGen.tgBasic, ArgGen.agDepthToSpace),
678 "types": TYPE_F,
679 },
680 "one_hot": {
681 "operands": (3, 1),
682 "build_fcn": (TBuilder.OneHot, TGen.tgOneHot, ArgGen.agOneHot),
683 "types": TYPE_FI,
684 },
685 "fakequant": {
686 "operands": (1, 0),
687 "build_fcn": (
688 TBuilder.Fakequant,
689 TGen.tgBasic,
690 ArgGen.agFakequant,
691 ),
692 "types": {"tf": TYPE_F},
693 },
694 "resize_nearest": {
695 "operands": (1, 0),
696 "build_fcn": (TBuilder.ResizeNearest, TGen.tgPooling, ArgGen.agNone),
697 "types": {
698 "tf": TYPE_F,
699 "tflite": list(
700 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
701 ),
702 },
703 },
704 "resize_bilinear": {
705 "operands": (1, 0),
706 "build_fcn": (TBuilder.ResizeBilinear, TGen.tgPooling, ArgGen.agNone),
707 "types": {
708 "tf": TYPE_F,
709 "tflite": list(
710 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
711 ),
712 },
713 },
TatWai Chongf7326092022-06-08 12:17:14 -0700714 "resize_bilinear_v1_align_corners": {
715 "operands": (1, 0),
716 "build_fcn": (
717 TBuilder.ResizeBilinearV1AlignCorners,
718 TGen.tgPooling,
719 ArgGen.agNone,
720 ),
721 "types": {
722 "tf": TYPE_F,
723 "tflite": list(
724 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
725 ),
726 },
727 },
728 "resize_bilinear_v1_none": {
729 "operands": (1, 0),
730 "build_fcn": (TBuilder.ResizeBilinearV1None, TGen.tgPooling, ArgGen.agNone),
731 "types": {
732 "tf": TYPE_F,
733 "tflite": list(
734 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
735 ),
736 },
737 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000738 "left_shift": {
739 "operands": (1, 0),
740 "build_fcn": (TBuilder.LeftShift, TGen.tgBasic, ArgGen.agShift),
741 "types": {"tf": [tf.int32]},
742 },
743 "right_shift": {
744 "operands": (1, 0),
745 "build_fcn": (TBuilder.RightShift, TGen.tgBasic, ArgGen.agShift),
746 "types": {
747 "tf": [
748 tf.int32,
749 ]
750 },
751 },
752}
753
754# Shapes to be tested; default can be overwritten
755shape_list = [
756 (1,),
757 (64,),
758 (14, 19),
759 (13, 21, 3),
760 (1, 4, 4, 4),
761 (1, 8, 4, 17),
762 (1, 4, 8, 19),
763 (1, 32, 32, 8),
764 (1, 7, 7, 9),
765]
766
767
768def gen_rand_shapes(args):
769 """Overwrite the global shape list with a new list of random shapes"""
770 global shape_list
771
772 rng = np.random.default_rng(args.random_seed)
773
774 # Don't let things get too big... cap the maximum volume, but let
775 # an individual dimension be 1..47
776 max_total_volume = 32 * 32 * 4
777
778 shape_list = []
779 # Only iterate over ranks 2, 3, and 4
780 for rank in range(2, 5):
781 for n in range(args.random_shapes):
782 new_shape = rng.integers(1, 48, size=rank)
783
784 # Set the batch dimension on 4D objects to 1
785 if rank == 4:
786 new_shape[0] = 1
787
788 # Limit the total shape volume and throw out any
789 # shapes that wouldn't leave at least size=2 in some non-batch dimension
790 volume = 1
791 skip_shape = False
792 for i in range(rank):
793
794 volume *= new_shape[i]
795
796 # Reduce the shape, while it's larger than the maximum volume
797 while volume > max_total_volume:
798 new_shape[i] = new_shape[i] // 2
799 volume = volume // 2
800
801 # Now an untenable dimension size? Skip this one.
802 if new_shape[i] < 1:
803 skip_shape = True
804
805 if not skip_shape:
806 shape_list.append(tuple(new_shape))
807
808
809# Construct, run and save a whole tensorflow tf.function to a protobuf file
810# or convert to .tflite if it's quantized unit test
811def run_unit_test(
812 op_name,
813 args,
814 test_dir,
815 curr_shape,
816 addl_args,
817 dtype,
818 excluded_framework_list,
819 quantized_inference_dtype,
820 result_name,
821 seed,
822):
823
824 try:
825 op = TF_OP_LIST[op_name]
826 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
827
828 # Get and seed a random number generator for this test
829 rng = np.random.default_rng(seed)
830
831 # return placeholders=(str: name, np.array: value)
832 # consts=(str: name, np.array: value)
833 placeholders, consts = tensor_gen_fcn(op, curr_shape, dtype, rng)
834
835 # if test doesn't have any placeholders/consts, terminated
836 if len(placeholders) == 0 and len(consts) == 0:
837 return True
838
839 if not args.quiet:
840 print(" {} ".format(test_dir))
841
842 try:
843 os.mkdir(test_dir)
844 except FileExistsError:
845 pass
846
847 const_nodes = [value for name, value in consts]
848
849 num_placeholders = len(placeholders)
850 # if test is quantized, create tensor quantization metadata info for
851 # each input tensor, based on different quantized type
852 if quantized_inference_dtype:
853 is_quantized = True
854 # TODO: support INT8 IFM x INT4 weight later
855 if quantized_inference_dtype == QuantType.ALL_U8:
856 qzero = [128] * num_placeholders
857 numpy_dtype = [np.uint8] * num_placeholders
858 tflite_inference_dtype = tf.uint8
859 elif quantized_inference_dtype == QuantType.ALL_I8:
860 qzero = [0] * num_placeholders
861 numpy_dtype = [np.int8] * num_placeholders
862 tflite_inference_dtype = tf.int8
863 elif quantized_inference_dtype == QuantType.ALL_I16:
864 qzero = [0] * num_placeholders
865 numpy_dtype = [np.int16] * num_placeholders
866 tflite_inference_dtype = tf.int16
867 elif quantized_inference_dtype == QuantType.CONV_U8_U8:
868 assert (
869 num_placeholders == 1
870 ), "Unsupported number of placeholders for Convolution: {}".format(
871 num_placeholders
872 )
873 qzero = [128] * num_placeholders
874 if num_placeholders == 2:
875 numpy_dtype = [np.uint8, np.uint8]
876 else:
877 numpy_dtype = [np.uint8, np.uint8, np.int32]
878 tflite_inference_dtype = tf.uint8
879 elif quantized_inference_dtype == QuantType.CONV_I8_I8:
880 assert (
881 num_placeholders == 1
882 ), "Unsupported number of placeholders for Convolution: {}".format(
883 num_placeholders
884 )
885 qzero = [0] * num_placeholders
886 if num_placeholders == 2:
887 numpy_dtype = [np.int8, np.int8]
888 else:
889 numpy_dtype = [np.int8, np.int8, np.int32]
890 tflite_inference_dtype = tf.int8
891 elif quantized_inference_dtype == QuantType.CONV_I16_I8:
892 assert (
893 num_placeholders == 1
894 ), "Unsupported number of placeholders for Convolution: {}".format(
895 num_placeholders
896 )
897 if num_placeholders == 2:
898 qzero = [0, 0]
899 numpy_dtype = [np.int16, np.int8]
900 else:
901 qzero = [0, 0, 0]
902 numpy_dtype = [
903 np.int16,
904 np.int8,
905 np.int64,
906 ] # np.int64 to represent 40 bits accumulator
907 tflite_inference_dtype = tf.int16
908 else:
909 raise Exception(
910 "Unsupported fakequant dtype: {}".format(quantized_inference_dtype)
911 )
912
913 else:
914 is_quantized = False
915
916 tf_model_filename = None
917 tf_result_npy_filename = None
918 tf_result_name = None
919
920 tflite_model_filename = None
921 tflite_result_npy_filename = None
922 tflite_result_name = None
923
924 placeholder_names = []
925 placeholder_vals = []
926 placeholder_signatures = ()
927 placeholder_npy_filenames = []
928 placeholder_shapes = []
929
930 for idx, (name, val) in enumerate(placeholders):
931 placeholder_names.append(name)
932 placeholder_signatures = placeholder_signatures + (
933 tf.TensorSpec(shape=val.shape, dtype=val.dtype, name=name),
934 )
935 placeholder_npy_filenames.append("{}.npy".format(name.split(":")[0]))
936 placeholder_shapes.append(val.shape)
937
938 # Get test builder class
939 fcn_node = op_fcn(*const_nodes, *addl_args, result_name)
940 concrete_function = tf.function(input_signature=placeholder_signatures)(
941 fcn_node.eval
942 ).get_concrete_function()
943
944 if is_quantized:
945
946 assert dtype is tf.float32, "quantized test must come from float32 graph"
947
948 # 1. Quantize float placeholder npy to quantized to feed the graph
949 for idx, (name, val) in enumerate(placeholders):
950
951 # we use np.amin()/np.amax() to determine dynamic range
952 # for quantized test
953 zeropoint = 0
954 scale = 1.0
955 if numpy_dtype[idx] != np.int64:
956 qmin = np.iinfo(numpy_dtype[idx]).min
957 qmax = np.iinfo(numpy_dtype[idx]).max
958 num_bits = np.iinfo(numpy_dtype[idx]).bits
959 # 40 bit is represented as np.int64
960 else:
961 num_bits = 40
962 qmin = -(1 << num_bits)
963 qmax = (1 << num_bits) - 1
964
965 min_val = np.amin(val)
966 max_val = np.amax(val)
967
968 # for single value tensor, we set scale equal to the abs(value),
969 # and fix zeropoint to 128
970 # if val > 0, it'll be represented as 129,
971 # where val = (129 - 128) * val
972 # if val < 0, it'll be represented as 127,
973 # where val = (127 - 128) * (-val)
974 # if val == 0, it'll be represted as 128, with range [-128.0, 128.0]
975 # and let quantized 1 represent the value
976 # also adjust effective min/max consequently
977 if max_val == min_val:
978 if max_val != 0:
979 scale = abs(max_val)
980 else:
981 scale = 1.0
982 min_val = float(qmin - qzero[idx]) * scale
983 max_val = float(qmax - qzero[idx]) * scale
984 else:
985 scale = (max_val - min_val) / float(qmax - qmin)
986 zeropoint = int(round((-min_val) / scale)) + qmin
987
988 # run through tf.fakequant first to assure quantization error aligned
989 fakequant_val = tf.quantization.fake_quant_with_min_max_args(
990 val,
991 min=min_val,
992 max=max_val,
993 num_bits=num_bits,
994 name="gen_quant_npy",
995 )
996
997 quant_val = np.round(fakequant_val / scale).astype(np.int32) + zeropoint
998
999 # very few unit tests after TF hash may/2020, this quantized
1000 # value for some reason exceed [0, 255] range
1001 saved_val = np.clip(quant_val, qmin, qmax).astype(numpy_dtype[idx])
1002
1003 # saved all quantized tensor as np.int32
1004 # since TOSA numpy Cpp API only supports int32
1005 np.save(
1006 os.path.join(test_dir, placeholder_npy_filenames[idx]),
1007 saved_val.astype(np.int32),
1008 False,
1009 )
1010
1011 placeholder_vals.append(tf.convert_to_tensor(saved_val))
1012
1013 # 2. Convert the model to quantized TFLite flatbuffer
1014 module = tf.Module()
1015 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1016 [concrete_function], module
1017 )
1018 converter.optimizations = [tf.lite.Optimize.DEFAULT]
1019 converter.experimental_new_converter = True
1020
1021 # use MLIR-based post-quantizer
1022 converter.experimental_new_quantizer = True
1023
1024 flag = (
1025 tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 # noqa: E501
1026 )
1027 if tflite_inference_dtype == tf.int16:
1028 converter.target_spec.supported_ops = [flag]
1029
1030 def input_stats():
1031 for i in range(0, args.num_samples):
1032 a = [
1033 TGen.getRand(shape, tf.float32, rng)
1034 for shape in placeholder_shapes
1035 ]
1036 yield a
1037
1038 converter.representative_dataset = input_stats
1039 converter.inference_input_type = tflite_inference_dtype
1040 converter.inference_output_type = tflite_inference_dtype
1041
1042 tflite_model = converter.convert()
1043
1044 tflite_model_filename = "model.tflite"
1045
1046 # Write out converted model to disk
1047 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1048 f.write(tflite_model)
1049
1050 else: # is_quantized is False
1051
1052 # 1. Saved out numpy array directly
1053 for idx, (name, val) in enumerate(placeholders):
1054 placeholder_vals.append(tf.convert_to_tensor(val))
1055 np.save(
1056 os.path.join(test_dir, placeholder_npy_filenames[idx]), val, False
1057 )
1058
1059 # 2.a Saved out .pb if framework includes tensorflow
1060 if "tf" not in excluded_framework_list:
1061 # Write out graph as protobuf to disk
1062 tf_model_filename = "model.pb"
1063 tf.io.write_graph(
1064 concrete_function.graph, test_dir, tf_model_filename, True
1065 )
1066
1067 # 2.b Saved out .tflite if framework includes tflite
1068 if "tflite" not in excluded_framework_list:
1069 # Convert the model to TFLite flatbuffer
1070 module = tf.Module()
1071 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1072 [concrete_function], module
1073 )
1074
1075 converter.experimental_new_converter = True
1076
1077 # Even it's non-quantized int32 test, this needs to be set to tf.float32
1078 converter.inference_input_type = tf.float32
1079 converter.inference_output_type = tf.float32
1080 tflite_model = converter.convert()
1081
1082 # Write out converted model to disk
1083 tflite_model_filename = "model.tflite"
1084 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1085 f.write(tflite_model)
1086
1087 # Get TF reference result if .pb is specified
1088 if tf_model_filename:
1089 tf_result_npy_filename = "tf_result.npy"
1090 tf_result = concrete_function(*placeholder_vals)
1091 np.save(os.path.join(test_dir, tf_result_npy_filename), tf_result, False)
1092
1093 tf_result_name = result_name
1094
1095 # Get TFLite inference result if .tflite is specified
1096 if tflite_model_filename:
1097 tflite_result_npy_filename = "tflite_result.npy"
1098
1099 ops_with_optimized_only_kernel = ["elu", "ceil", "gather"]
1100
1101 if args.tflite_kernel_mode == "optimized" or (
1102 op_name in ops_with_optimized_only_kernel
1103 ):
1104 interpreter = tf.lite.Interpreter(
1105 model_path=os.path.join(test_dir, tflite_model_filename)
1106 )
1107 elif args.tflite_kernel_mode == "reference":
1108 interpreter = tf.lite.Interpreter(
1109 model_path=os.path.join(test_dir, tflite_model_filename),
1110 experimental_op_resolver_type=OpResolverType.BUILTIN_REF,
1111 )
1112 else:
1113 assert 0, "unknown tflite interpreter mode {}".format(
1114 args.tflite_kernel_mode
1115 )
1116 interpreter.allocate_tensors()
1117
1118 input_details = interpreter.get_input_details()
1119 output_details = interpreter.get_output_details()
1120
1121 assert len(input_details) == len(
1122 placeholder_vals
1123 ), "number of placeholder mismatch"
1124
1125 for idx, val in enumerate(placeholder_vals):
1126 interpreter.set_tensor(input_details[idx]["index"], val.numpy())
1127
1128 interpreter.invoke()
1129 tflite_result = interpreter.get_tensor(output_details[0]["index"])
1130
1131 np.save(
1132 os.path.join(test_dir, tflite_result_npy_filename), tflite_result, False
1133 )
1134
1135 # Result tensor name would change after converting to TFLite flatbuffer
1136 # Overwrite the information from TFLite models directly.
1137 # Assume single result tensor now
1138 tflite_result_name = output_details[0]["name"]
1139
1140 # Write out test descriptor
1141 write_test_json(
1142 filename=os.path.join(test_dir, "test.json"),
1143 tf_model_filename=tf_model_filename,
1144 tf_result_npy_filename=tf_result_npy_filename,
1145 tf_result_name=tf_result_name,
1146 tflite_model_filename=tflite_model_filename,
1147 tflite_result_npy_filename=tflite_result_npy_filename,
1148 tflite_result_name=tflite_result_name,
1149 ifm_name=placeholder_names,
1150 ifm_file=placeholder_npy_filenames,
1151 ifm_shape=placeholder_shapes,
1152 framework_exclusions=excluded_framework_list,
1153 quantized=is_quantized,
1154 )
1155 except Exception as e:
1156 msg = "Error running task: {}".format(e)
1157 print(msg)
1158 print(
1159 "".join(
1160 traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)
1161 )
1162 )
1163 return False
1164 return True
1165
1166
1167def build_const_net(
1168 args,
1169 curr_shape,
1170 op_name,
1171 dtype,
1172 excluded_framework_list,
1173 quantized_inference_dtype,
1174 result_name,
1175 seed,
1176 rng,
1177 filter,
1178 unit_test_args,
1179):
1180
1181 if quantized_inference_dtype:
1182 quant_dtype = get_tf_dtype(quantized_inference_dtype)
1183 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, quant_dtype))
1184 else:
1185 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, dtype))
1186 test_dir = os.path.join(args.output_dir, test_dir)
1187
1188 # If the operator has an additional function to generate arguments, call it
1189 # here and iterate through the argument list that it generates
1190 op = TF_OP_LIST[op_name]
1191 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1192
1193 addl_args_tuple = arg_gen_fcn(op, curr_shape, rng)
1194 for desc, addl_args in addl_args_tuple:
Jeremy Johnson0e6218e2022-05-05 17:08:04 +01001195 # Only filter on the full test_name, not the output directory
1196 _, test_name = os.path.split(test_dir + desc)
1197 if not filter or filter.search(test_name):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001198 unit_test_args.append(
1199 [
1200 op_name,
1201 args,
1202 test_dir + desc,
1203 curr_shape,
1204 addl_args,
1205 dtype,
1206 excluded_framework_list,
1207 quantized_inference_dtype,
1208 result_name,
1209 seed,
1210 ]
1211 )
1212
1213
1214# python hash is not reproducible, create hash for our purpose
1215def op_name_hash(op_name):
1216 result = 0xDEADBEEF
1217 for ch in op_name:
1218 if result & 1:
1219 result = (ord(ch) << 24) ^ (result >> 1) ^ 0x82608EDB
1220 else:
1221 result = (ord(ch) << 24) ^ (result >> 1)
1222
1223 return result
1224
1225
1226def generate_op_tests(args, op_name, shape_list, result_name, filter, unit_test_args):
1227
1228 if not args.quiet:
1229 print(
1230 "Generating tests for {} ".format(
1231 op_name
1232 )
1233 )
1234
1235 op = TF_OP_LIST[op_name]
1236
1237 # Seed the RNG so that we get the same random tests for each test each time
1238 # If the number of tests for a given generation function changes, the tests
1239 # for that operator may also change accordingly, but this will at least keep
1240 # down churn across operators.
1241
1242 bounded_hash_val = (args.random_seed + op_name_hash(op_name)) % np.iinfo(
1243 np.int32
1244 ).max
1245 rng = np.random.default_rng(bounded_hash_val)
1246
1247 # this is a dictionary with 'tf' and 'tflite' as key
1248 # and value being the data types we want to test under these framework
1249
1250 if isinstance(op["types"], dict):
1251 try:
1252 tf_dtypes = op["types"]["tf"]
1253 except KeyError:
1254 tf_dtypes = []
1255 try:
1256 tflite_dtypes = op["types"]["tflite"]
1257 except KeyError:
1258 tflite_dtypes = []
1259 elif isinstance(op["types"], list):
1260 tf_dtypes = op["types"]
1261 tflite_dtypes = op["types"]
1262
1263 tf_nonquantized_dtypes = tf_dtypes # tf doesn't support quantized data types
1264 tflite_quantized_dtypes = []
1265 tflite_nonquantized_dtypes = []
1266 for dtype in tflite_dtypes:
1267 if isinstance(dtype, QuantType):
1268 tflite_quantized_dtypes.append(dtype)
1269 else:
1270 tflite_nonquantized_dtypes.append(dtype)
1271
1272 nonquantized_dtypes_set = set(tf_nonquantized_dtypes).union(
1273 set(tflite_nonquantized_dtypes)
1274 )
1275 nonquantized_dtypes = list(nonquantized_dtypes_set)
1276 quantized_dtypes = tflite_quantized_dtypes
1277
1278 # populate non quantized unit test arguments
1279 for dtype in nonquantized_dtypes:
1280
1281 excluded_framework_set = set(ALL_FRAMEWORKS)
1282 if dtype in tf_nonquantized_dtypes:
1283 excluded_framework_set.remove("tf")
1284 if dtype in tflite_nonquantized_dtypes:
1285 excluded_framework_set.remove("tflite")
1286 excluded_framework_list = list(excluded_framework_set)
1287
1288 for curr_shape in shape_list:
1289 build_const_net(
1290 args,
1291 curr_shape,
1292 op_name,
1293 dtype,
1294 excluded_framework_list,
1295 None,
1296 result_name,
1297 bounded_hash_val,
1298 rng,
1299 filter,
1300 unit_test_args,
1301 )
1302
1303 # populate quantized unit test arguments
1304 # must exclude 'tf' and source dtype being tf.float32
1305 for dtype in quantized_dtypes:
1306 for curr_shape in shape_list:
1307 build_const_net(
1308 args,
1309 curr_shape,
1310 op_name,
1311 tf.float32,
1312 ["tf"],
1313 dtype,
1314 result_name,
1315 bounded_hash_val,
1316 rng,
1317 filter,
1318 unit_test_args,
1319 )
1320
1321 return unit_test_args
1322
1323
1324def createDynamicOpLists():
1325 """The templated operators are conv2d-style operators with a number of kernel
1326 sizes. Since the operator is unchanged, we generate the range of kernel
1327 sizes here in this loop and remove the original templates from the list.
1328
1329 This could be expanded to non-conv2d-style operators in the future."""
1330
1331 # Dynamically create op lists for convolutions with a list of kernel sizes
1332 KERNELS = [
1333 [1, 1],
1334 [3, 3],
1335 [5, 5],
1336 ]
1337
1338 TEMPLATE_LIST = [
1339 "conv2d",
1340 "conv2d_bias",
1341 "conv2d_relu",
1342 "conv2d_relu6",
1343 "conv2d_relu_n1_to_1",
1344 "conv2d_tanh",
1345 "depthwise_conv2d",
1346 "depthwise_conv2d_bias",
1347 "transpose_conv2d",
1348 ]
1349
1350 for t in TEMPLATE_LIST:
1351 for k in KERNELS:
1352 testName = "{}_{}x{}".format(t, k[0], k[1])
1353 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1354 TF_OP_LIST[testName]["filter"] = k
1355 TF_OP_LIST[testName]["template"] = False
1356
1357 # Delete any templates after having created any dynamic ops
1358 # This is a two-pass operation because it's bad practice to delete
1359 # keys from dictionaries while iterating
1360 keyList = []
1361 for k in TF_OP_LIST:
1362 try:
1363 if TF_OP_LIST[k]["template"]:
1364 keyList.append(k)
1365 continue
1366 except KeyError:
1367 pass
1368
1369 for k in keyList:
1370 del TF_OP_LIST[k]
1371
1372
1373def main():
1374 parser = argparse.ArgumentParser()
1375 parser.add_argument(
1376 "--seed", dest="random_seed", default=42, type=int, help="Random seed"
1377 )
1378 parser.add_argument(
1379 "--random-shapes",
1380 dest="random_shapes",
1381 default=0,
1382 type=int,
1383 help=(
1384 "Use N random shapes of each rank for generating tests,"
1385 "seeded with random seed"
1386 ),
1387 )
1388 parser.add_argument(
1389 "-o",
1390 "--output-dir",
1391 dest="output_dir",
1392 default=".",
1393 type=str,
1394 help="Test output directory path prefix",
1395 )
1396 parser.add_argument(
1397 "-q",
1398 "--quiet",
1399 dest="quiet",
1400 default=False,
1401 action="store_true",
1402 help="Do not print test names",
1403 )
1404 parser.add_argument(
1405 "-j", "--jobs", dest="jobs", type=int, default=1, help="Number of parallel jobs"
1406 )
1407 parser.add_argument(
1408 "-m",
1409 "--tflite-kernel-mode",
1410 dest="tflite_kernel_mode",
1411 type=str,
1412 choices=["reference", "optimized"],
1413 default="reference",
1414 help="TFLite interpreter kernel mode",
1415 )
1416 parser.add_argument(
1417 "--num-samples",
1418 dest="num_samples",
1419 default=200,
1420 type=int,
1421 help="Number of input samples for post-training quantization",
1422 )
1423 parser.add_argument(
1424 "--filter",
1425 dest="filter",
1426 default="",
1427 type=str,
1428 help="Filter test names by this expression",
1429 )
1430 args = parser.parse_args()
1431
1432 # Turn the filter into a re object if present
1433 filter = None
1434 if args.filter != "":
1435 filter = re.compile(args.filter)
1436
1437 # Autodetect CPU count
1438 if args.jobs <= 0:
1439 args.jobs = os.cpu_count()
1440
1441 # Disable TF info messages
1442 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
1443
1444 try:
1445 os.makedirs(args.output_dir)
1446 except FileExistsError:
1447 pass
1448
1449 if args.random_shapes:
1450 gen_rand_shapes(args)
1451
1452 # Build dynamic ops
1453 createDynamicOpLists()
1454
1455 # Generate the test list and arguments to run_unit_test()
1456 unit_test_args = []
1457
1458 for op in TF_OP_LIST:
1459 generate_op_tests(args, op, shape_list, "result", filter, unit_test_args)
1460
1461 errors = 0
1462 for t in unit_test_args:
1463 if not run_unit_test(*t):
1464 errors = errors + 1
1465
1466 if not args.quiet:
1467 print("\nAll tasks done - with {} errors".format(errors))
1468
1469 return 1 if errors else 0
1470
1471
1472if __name__ == "__main__":
1473 exit(main())