blob: 4167227686e068fab7191794de687e2ac94ef95a [file] [log] [blame]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001#!/usr/bin/env python3
2# Copyright (c) 2020-2022, ARM Limited.
3# SPDX-License-Identifier: Apache-2.0
4import argparse
5import os
6import re
7import traceback
8
9import numpy as np
10
11# Level | Level for Humans | Level Description
12# -------|------------------|------------------------------------
13# 0 | DEBUG | [Default] Print all messages
14# 1 | INFO | Filter out INFO messages
15# 2 | WARNING | Filter out INFO & WARNING messages
16# 3 | ERROR | Filter out all messages
17# Filter tensorflow debug message except errors
18os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
19
20# Flake8 E402 - ignore imports not at top of file to allow os.environ setting
21import tensorflow as tf # noqa: E402
22from frameworks.write_test_json import write_test_json # noqa: E402
23from frameworks.arg_gen import ArgGen # noqa: E402
24from frameworks.tensor_gen import TGen # noqa: E402
25from frameworks.test_builder import TBuilder # noqa: E402
Jeremy Johnson5d1a3472022-03-31 09:50:06 +010026from frameworks.test_gen_utils import ( # noqa: E402
Jeremy Johnson015c3552022-02-23 12:15:03 +000027 QuantType,
28 get_tf_dtype,
29 get_shape_str,
30) # noqa: E402
31from tensorflow.lite.python.interpreter import OpResolverType # noqa: E402
32
33# All of the supported frameworks
34ALL_FRAMEWORKS = ["tf", "tflite"]
35
36# Lists of different data types
37TYPE_F = [tf.float32]
38TYPE_I = [tf.int32]
39TYPE_FI = [tf.float32, tf.int32]
40TYPE_B = [tf.bool]
41TYPE_FIB = [tf.float32, tf.int32, tf.bool]
42TYPE_H = [tf.float16]
43TYPE_FH = [tf.float32, tf.float16]
44TYPE_FHI = [tf.float32, tf.float16, tf.int32]
45TYPE_FHIB = [tf.float32, tf.float16, tf.int32, tf.bool]
46
47# The list of operator tests
48# Each dictionary entry for an op is a dictionary with the following required members:
49# 'operands': tuple (number_of_placeholder_tensors, number_of_constant_tensors)
50# 'build_fcn: tuple (Test builder function, Tensor generator function,
51# Argument generator function)
52# 'types': list of Tensorflow types that should be tested for this op
53# OR
54# a dictionary of {'framework_name': [type_list] } for cases where only
55# a subset of the types should be tested in each framework. This can also
56# be used to restrict an operator to a particular framework.
57#
58# And optional members:
59# 'template': boolean (indicates that this is a templated op which gets further
60# processing in createDynamicOpLists)
61# 'bias': boolean indicating that there is a bias component to be generated
62# 'qtypes': List of QuantType quantized types to generate for this op
63
64TF_OP_LIST = {
65 "add": {
66 "operands": (2, 0),
67 "build_fcn": (TBuilder.Add, TGen.tgBFuzz, ArgGen.agNone),
68 "types": {
69 "tf": TYPE_FI,
70 "tflite": list(
71 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
72 ),
73 },
74 },
75 "sub": {
76 "operands": (2, 0),
77 "build_fcn": (TBuilder.Sub, TGen.tgBFuzz, ArgGen.agNone),
78 "types": {
79 "tf": TYPE_FI,
80 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
81 # QuantType.ALL_I16 fail in TFLite conversion
82 },
83 },
84 "mul": {
85 "operands": (2, 0),
86 "build_fcn": (TBuilder.Mul, TGen.tgBFuzz, ArgGen.agNone),
87 "types": {
88 "tf": TYPE_FI,
89 "tflite": list(
90 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
91 ),
92 },
93 },
94 "exp": {
95 "operands": (1, 0),
96 "build_fcn": (TBuilder.Exp, TGen.tgBasic, ArgGen.agNone),
97 "types": TYPE_F,
98 },
99 "rcp": {
100 "operands": (1, 0),
101 "build_fcn": (TBuilder.Rcp, TGen.tgBasic, ArgGen.agNone),
102 "types": TYPE_F,
103 },
104 "relu": {
105 "operands": (1, 0),
106 "build_fcn": (TBuilder.Relu, TGen.tgBasic, ArgGen.agNone),
107 "types": {
108 "tf": TYPE_F,
109 "tflite": list(
110 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
111 ),
112 },
113 },
Jerry Ge93912432022-07-22 10:29:13 -0700114 "relu1": {
115 "operands": (1, 0),
116 "build_fcn": (TBuilder.Relu1, TGen.tgBasic, ArgGen.agNone),
117 "types": {
118 "tf": [],
119 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
120 },
121 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000122 "relu6": {
123 "operands": (1, 0),
124 "build_fcn": (TBuilder.Relu6, TGen.tgBasic, ArgGen.agNone),
125 "types": {
126 "tf": TYPE_F,
127 "tflite": list(
128 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
129 ),
130 },
131 },
132 "leaky_relu": {
133 "operands": (1, 0),
134 "build_fcn": (TBuilder.LeakyRelu, TGen.tgBasic, ArgGen.agFloat),
135 "types": {
136 "tf": TYPE_F,
137 "tflite": list(
138 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
139 ),
140 },
141 },
142 "concat": {
143 "operands": (2, 0),
144 "build_fcn": (TBuilder.Concat, TGen.tgBasic, ArgGen.agAxes),
145 "types": TYPE_FI,
146 },
147 "bitwise_and": {
148 "operands": (2, 0),
149 "build_fcn": (TBuilder.BitwiseAnd, TGen.tgBFuzz, ArgGen.agNone),
150 "types": {"tf": TYPE_I}, # Not supported in TF Lite
151 },
152 "bitwise_or": {
153 "operands": (2, 0),
154 "build_fcn": (TBuilder.BitwiseOr, TGen.tgBFuzz, ArgGen.agNone),
155 "types": {"tf": TYPE_I}, # Not supported in TF Lite
156 },
157 "bitwise_not": {
158 "operands": (1, 0),
159 "build_fcn": (TBuilder.BitwiseNot, TGen.tgBFuzz, ArgGen.agNone),
160 "types": {"tf": TYPE_I}, # Not supported in TF Lite
161 },
162 "bitwise_xor": {
163 "operands": (2, 0),
164 "build_fcn": (TBuilder.BitwiseXor, TGen.tgBFuzz, ArgGen.agNone),
165 "types": {"tf": TYPE_I}, # Not supported in TF Lite
166 },
167 "logical_and": {
168 "operands": (2, 0),
169 "build_fcn": (TBuilder.LogicalAnd, TGen.tgBFuzz, ArgGen.agNone),
170 "types": TYPE_B,
171 },
172 "logical_or": {
173 "operands": (2, 0),
174 "build_fcn": (TBuilder.LogicalOr, TGen.tgBFuzz, ArgGen.agNone),
175 "types": TYPE_B,
176 },
177 "logical_not": {
178 "operands": (1, 0),
179 "build_fcn": (TBuilder.LogicalNot, TGen.tgBFuzz, ArgGen.agNone),
180 "types": TYPE_B,
181 },
182 "reduce_any": {
183 "operands": (1, 0),
184 "build_fcn": (TBuilder.ReduceAny, TGen.tgBasic, ArgGen.agAxesListKeepdims),
185 "types": TYPE_B,
186 },
187 "reduce_all": {
188 "operands": (1, 0),
189 "build_fcn": (TBuilder.ReduceAll, TGen.tgBasic, ArgGen.agAxesListKeepdims),
190 "types": {"tf": TYPE_B},
191 },
192 "reduce_min": {
193 "operands": (1, 0),
194 "build_fcn": (TBuilder.ReduceMin, TGen.tgBasic, ArgGen.agAxesListKeepdims),
195 "types": {
196 "tf": TYPE_FI,
197 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
198 },
199 },
200 "reduce_max": {
201 "operands": (1, 0),
202 "build_fcn": (TBuilder.ReduceMax, TGen.tgBasic, ArgGen.agAxesListKeepdims),
203 "types": {
204 "tf": TYPE_FI,
205 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
206 },
207 },
208 "reduce_sum": {
209 "operands": (1, 0),
210 "build_fcn": (TBuilder.ReduceSum, TGen.tgBasic, ArgGen.agAxesListKeepdims),
211 "types": {
212 "tf": TYPE_F,
213 # v2 converter doesn't recognize quantized reduce_sum
214 # "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
215 "tflite": TYPE_F,
216 },
217 },
218 "reduce_mean": {
219 "operands": (1, 0),
220 "build_fcn": (TBuilder.ReduceMean, TGen.tgBasic, ArgGen.agAxesListKeepdims),
221 "types": {
222 "tf": TYPE_F,
223 "tflite": list(
224 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
225 ),
226 },
227 },
228 "reduce_product": {
229 "operands": (1, 0),
230 "build_fcn": (TBuilder.ReduceProduct, TGen.tgBasic, ArgGen.agAxesListKeepdims),
231 "types": TYPE_F,
232 },
233 "min": {
234 "operands": (2, 0),
235 "build_fcn": (TBuilder.Min, TGen.tgBFuzz, ArgGen.agNone),
236 "types": TYPE_FI,
237 },
238 "max": {
239 "operands": (2, 0),
240 "build_fcn": (TBuilder.Max, TGen.tgBFuzz, ArgGen.agNone),
241 "types": TYPE_FI,
242 },
243 "pow": {
244 "operands": (2, 0),
245 "build_fcn": (TBuilder.Pow, TGen.tgBFuzz, ArgGen.agNone),
246 # Technically, integer is supported, but only for positive exponents.
247 # Needs a random argument generator.
248 "types": TYPE_F,
249 },
250 "abs": {
251 "operands": (1, 0),
252 "build_fcn": (TBuilder.Abs, TGen.tgBasic, ArgGen.agNone),
253 "types": TYPE_F,
254 },
255 "ceil": {
256 "operands": (1, 0),
257 "build_fcn": (TBuilder.Ceil, TGen.tgBasic, ArgGen.agNone),
258 "types": TYPE_F,
259 },
260 "floor": {
261 "operands": (1, 0),
262 "build_fcn": (TBuilder.Floor, TGen.tgBasic, ArgGen.agNone),
263 "types": TYPE_F,
264 },
265 "log": {
266 "operands": (1, 0),
267 "build_fcn": (TBuilder.Log, TGen.tgBasic, ArgGen.agNone),
268 "types": TYPE_F,
269 },
270 "negate": {
271 "operands": (1, 0),
272 "build_fcn": (TBuilder.Negate, TGen.tgBasic, ArgGen.agNone),
273 "types": TYPE_F,
274 },
275 "rsqrt": {
276 "operands": (1, 0),
277 "build_fcn": (TBuilder.Rsqrt, TGen.tgBasic, ArgGen.agNone),
278 "types": TYPE_F,
279 },
280 "sigmoid": {
281 "operands": (1, 0),
282 "build_fcn": (TBuilder.Sigmoid, TGen.tgBasic, ArgGen.agNone),
283 "types": {
284 "tf": TYPE_F,
285 "tflite": list(
286 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
287 ),
288 },
289 },
290 "tanh": {
291 "operands": (1, 0),
292 "build_fcn": (TBuilder.Tanh, TGen.tgBasic, ArgGen.agNone),
293 "types": {
294 "tf": TYPE_F,
295 "tflite": list(
296 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
297 ),
298 },
299 },
300 "square": {
301 "operands": (1, 0),
302 "build_fcn": (TBuilder.Square, TGen.tgBasic, ArgGen.agNone),
303 "types": TYPE_F,
304 },
305 "squared_difference": {
306 "operands": (2, 0),
307 "build_fcn": (TBuilder.SquaredDifference, TGen.tgBFuzz, ArgGen.agNone),
308 "types": TYPE_F,
309 },
310 "equal": {
311 "operands": (2, 0),
312 "build_fcn": (TBuilder.Equal, TGen.tgBFuzz, ArgGen.agNone),
313 "types": TYPE_FI,
314 },
315 "greater_equal": {
316 "operands": (2, 0),
317 "build_fcn": (TBuilder.GreaterEqual, TGen.tgBFuzz, ArgGen.agNone),
318 "types": TYPE_FI,
319 },
320 "greater": {
321 "operands": (2, 0),
322 "build_fcn": (TBuilder.Greater, TGen.tgBFuzz, ArgGen.agNone),
323 "types": TYPE_FI,
324 },
325 "less": {
326 "operands": (2, 0),
327 "build_fcn": (TBuilder.Less, TGen.tgBFuzz, ArgGen.agNone),
328 "types": TYPE_FI,
329 },
330 "less_equal": {
331 "operands": (2, 0),
332 "build_fcn": (TBuilder.LessEqual, TGen.tgBFuzz, ArgGen.agNone),
333 "types": TYPE_FI,
334 },
335 "conv2d_TEMPLATE": {
336 "operands": (1, 1),
337 "build_fcn": (TBuilder.Conv2d, TGen.tgConv2d, ArgGen.agConv2d),
338 "types": {
339 "tf": [tf.float32],
340 "tflite": [
341 tf.float32,
342 QuantType.CONV_U8_U8,
343 QuantType.CONV_I8_I8,
344 QuantType.CONV_I16_I8,
345 ],
346 },
347 "template": True,
348 },
349 "conv2d_relu_TEMPLATE": {
350 "operands": (1, 2),
351 "build_fcn": (TBuilder.Conv2dRelu, TGen.tgConv2d, ArgGen.agNone),
352 "types": {
353 "tf": [tf.float32],
354 "tflite": [
355 tf.float32,
356 QuantType.CONV_U8_U8,
357 QuantType.CONV_I8_I8,
358 QuantType.CONV_I16_I8,
359 ],
360 },
361 "template": True,
362 },
363 "conv2d_relu6_TEMPLATE": {
364 "operands": (1, 2),
365 "build_fcn": (TBuilder.Conv2dRelu6, TGen.tgConv2d, ArgGen.agNone),
366 "types": {
367 "tf": [tf.float32],
368 "tflite": [
369 tf.float32,
370 QuantType.CONV_U8_U8,
371 QuantType.CONV_I8_I8,
372 QuantType.CONV_I16_I8,
373 ],
374 },
375 "template": True,
376 },
377 "conv2d_relu_n1_to_1_TEMPLATE": {
378 "operands": (1, 2),
379 "build_fcn": (TBuilder.Conv2dReluN1To1, TGen.tgConv2d, ArgGen.agNone),
380 "types": {
381 "tf": [tf.float32],
382 "tflite": [
383 tf.float32,
384 QuantType.CONV_U8_U8,
385 QuantType.CONV_I8_I8,
386 QuantType.CONV_I16_I8,
387 ],
388 },
389 "template": True,
390 },
391 # This test is converted as:
392 # tfl.conv2d(){fused_activation_function="NONE"} + tfl.tanh()
393 # TODO: anyway to generate tfl.conv2d(){fused_activation_function="TANH"}?
394 "conv2d_tanh_TEMPLATE": {
395 "operands": (1, 2),
396 "build_fcn": (TBuilder.Conv2dTanh, TGen.tgConv2d, ArgGen.agNone),
397 "types": {
398 "tf": [tf.float32],
399 "tflite": [
400 tf.float32,
401 QuantType.CONV_U8_U8,
402 QuantType.CONV_I8_I8,
403 QuantType.CONV_I16_I8,
404 ],
405 },
406 "template": True,
407 },
408 "conv2d_bias_TEMPLATE": {
409 "operands": (1, 2),
410 "build_fcn": (TBuilder.Conv2dWithBias, TGen.tgConv2d, ArgGen.agConv2d),
411 "types": {
412 "tf": [tf.float32],
413 "tflite": [
414 tf.float32,
415 QuantType.CONV_U8_U8,
416 QuantType.CONV_I8_I8,
417 QuantType.CONV_I16_I8,
418 ],
419 },
420 "bias": True,
421 "template": True,
422 },
423 "depthwise_conv2d_TEMPLATE": {
424 "operands": (1, 1),
425 "build_fcn": (
426 TBuilder.DepthwiseConv2d,
427 TGen.tgDepthwiseConv2d,
428 ArgGen.agDepthwiseConv2d,
429 ),
430 "types": {
431 "tf": [tf.float32],
432 "tflite": [
433 tf.float32,
434 QuantType.CONV_U8_U8,
435 QuantType.CONV_I8_I8,
436 QuantType.CONV_I16_I8,
437 ],
438 },
439 "template": True,
440 },
441 "depthwise_conv2d_bias_TEMPLATE": {
442 "operands": (1, 2),
443 "build_fcn": (
444 TBuilder.DepthwiseConv2dWithBias,
445 TGen.tgDepthwiseConv2d,
446 ArgGen.agDepthwiseConv2d,
447 ),
448 "types": {
449 "tf": [tf.float32],
450 "tflite": [
451 tf.float32,
452 QuantType.CONV_U8_U8,
453 QuantType.CONV_I8_I8,
454 QuantType.CONV_I16_I8,
455 ],
456 },
457 "bias": True,
458 "template": True,
459 },
460 "transpose_conv2d_TEMPLATE": {
461 "operands": (1, 1),
462 "build_fcn": (
463 TBuilder.TransposeConv2d,
464 TGen.tgTransposeConv2d,
465 ArgGen.agTransposeConv2d,
466 ),
467 "types": {
468 "tf": [tf.float32],
469 "tflite": [
470 tf.float32,
471 QuantType.CONV_U8_U8,
472 QuantType.CONV_I8_I8,
473 QuantType.CONV_I16_I8,
474 ],
475 },
476 "template": True,
477 },
478 "argmax": {
479 "operands": (1, 0),
480 "build_fcn": (TBuilder.Argmax, TGen.tgBasic, ArgGen.agAxes),
481 "types": {"tf": TYPE_F},
482 },
483 "avg_pool2d": {
484 "operands": (1, 0),
485 "build_fcn": (TBuilder.AvgPool2d, TGen.tgPooling, ArgGen.agPooling),
486 "types": {
487 "tf": TYPE_F,
488 "tflite": list(
489 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
490 ),
491 },
492 },
493 "max_pool2d": {
494 "operands": (1, 0),
495 "build_fcn": (TBuilder.MaxPool2d, TGen.tgPooling, ArgGen.agPooling),
496 "types": {
497 "tf": TYPE_F,
498 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
499 # ALL_I16 not supported yet
500 # In tensorflow/compiler/mlir/lite/ir/tfl_ops.td,
501 # QI16 is missing from MaxPoolOperandAndResultConstraints
502 # If adding QI16 back this test can run through.
503 },
504 },
505 "reshape": {
506 "operands": (1, 0),
507 "build_fcn": (TBuilder.Reshape, TGen.tgBasic, ArgGen.agReshape),
508 "types": TYPE_FI,
509 },
510 "transpose": {
511 "operands": (1, 0),
512 "build_fcn": (TBuilder.Transpose, TGen.tgBasic, ArgGen.agTranspose),
513 "types": TYPE_FI,
514 },
515 "slice": {
516 "operands": (1, 0),
517 "build_fcn": (TBuilder.Slice, TGen.tgBasic, ArgGen.agSlice),
518 "types": TYPE_FI,
519 },
520 "strided_slice": {
521 "operands": (1, 0),
522 "build_fcn": (TBuilder.StridedSlice, TGen.tgBasic, ArgGen.agStridedSlice),
523 "types": TYPE_FI,
524 },
525 "select": {
526 "operands": (3, 0),
527 "build_fcn": (TBuilder.Select, TGen.tgSelect, ArgGen.agNone),
528 "types": TYPE_FI,
529 },
530 "addn": {
531 "operands": (4, 0),
532 "build_fcn": (TBuilder.Addn, TGen.tgBasic, ArgGen.agNone),
533 "types": TYPE_FI,
534 },
535 "concatv2": {
536 "operands": (4, 0),
537 "build_fcn": (TBuilder.Concatv2, TGen.tgBasic, ArgGen.agAxes),
538 "types": TYPE_FI,
539 },
540 "stack": {
541 "operands": (4, 0),
542 "build_fcn": (TBuilder.Stack, TGen.tgBasic, ArgGen.agStack),
543 "types": TYPE_FI,
544 },
545 "unstack": {
546 "operands": (1, 0),
547 "build_fcn": (TBuilder.Unstack, TGen.tgPooling, ArgGen.agAxes),
548 "types": TYPE_F,
549 },
550 "pad": {
551 "operands": (1, 0),
552 "build_fcn": (TBuilder.Pad, TGen.tgBasic, ArgGen.agPad),
553 "types": TYPE_F,
554 },
555 "expand_dims": {
556 "operands": (1, 0),
557 "build_fcn": (TBuilder.ExpandDims, TGen.tgBasic, ArgGen.agStack),
558 "types": TYPE_FI,
559 },
560 "shape": {
561 "operands": (1, 0),
562 "build_fcn": (TBuilder.Shape, TGen.tgBasic, ArgGen.agNone),
563 "types": TYPE_FI,
564 },
565 "rank": {
566 "operands": (1, 0),
567 "build_fcn": (TBuilder.Rank, TGen.tgBasic, ArgGen.agNone),
568 "types": TYPE_FI,
569 },
570 "fill": {
571 "operands": (1, 0),
572 "build_fcn": (TBuilder.Fill, TGen.tgBasic, ArgGen.agFill),
573 "types": TYPE_FI,
574 },
575 "elu": {
576 "operands": (1, 0),
577 "build_fcn": (TBuilder.Elu, TGen.tgBasic, ArgGen.agNone),
578 "types": TYPE_F,
579 },
580 "softmax": {
581 "operands": (1, 0),
582 "build_fcn": (TBuilder.Softmax, TGen.tgBasic, ArgGen.agNone),
583 "types": {
584 "tf": TYPE_F,
585 "tflite": list(
586 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
587 ),
588 },
589 },
590 "log_softmax": {
591 "operands": (1, 0),
592 "build_fcn": (TBuilder.LogSoftmax, TGen.tgBasic, ArgGen.agNone),
593 "types": TYPE_F,
594 },
595 "matmul": {
596 "operands": (2, 0),
597 "build_fcn": (TBuilder.MatMul, TGen.tgMatmul, ArgGen.agNone),
598 "types": {
599 "tf": TYPE_F,
600 "tflite": list(
601 TYPE_F
602 + [QuantType.ALL_U8, QuantType.ALL_I8]
603 # 16 bits matmul fail to convert
604 ),
605 },
606 },
607 "add_scalar": {
608 "operands": (1, 0),
609 "build_fcn": (TBuilder.AddScalar, TGen.tgBasic, ArgGen.agNone),
610 "types": TYPE_F,
611 },
612 "add_1d": {
613 "operands": (2, 0),
614 "build_fcn": (TBuilder.Add1d, TGen.tgBasic, ArgGen.agNone),
615 "types": TYPE_F,
616 },
617 "split": {
618 "operands": (1, 0),
619 "build_fcn": (TBuilder.Split, TGen.tgBasic, ArgGen.agSplit),
620 "types": TYPE_FI,
621 },
622 "tile": {
623 "operands": (1, 0),
624 "build_fcn": (TBuilder.Tile, TGen.tgBasic, ArgGen.agTile),
625 "types": TYPE_FI,
626 },
627 "reverse": {
628 "operands": (1, 0),
629 "build_fcn": (TBuilder.Reverse, TGen.tgBasic, ArgGen.agAxes),
630 "types": {"tf": TYPE_FI},
631 },
632 "gather": {
633 "operands": (1, 0),
634 "build_fcn": (TBuilder.Gather, TGen.tgBasic, ArgGen.agGather),
635 "types": TYPE_FI,
636 },
637 "gather_nd": {
638 "operands": (1, 0),
639 "build_fcn": (TBuilder.GatherNd, TGen.tgBasic, ArgGen.agGatherND),
640 "types": TYPE_FI,
641 },
642 "scatter_nd": {
643 "operands": (1, 0),
644 "build_fcn": (TBuilder.ScatterNd, TGen.tgBasic, ArgGen.agScatterND),
645 "types": TYPE_FI,
646 },
647 "space_to_batch": {
648 "operands": (1, 0),
649 "build_fcn": (TBuilder.SpaceToBatch, TGen.tgBasic, ArgGen.agSpaceToBatch),
650 "types": TYPE_F,
651 },
652 "batch_to_space": {
653 "operands": (1, 0),
654 "build_fcn": (TBuilder.BatchToSpace, TGen.tgBasic, ArgGen.agBatchToSpace),
655 "types": TYPE_F,
656 },
657 "space_to_depth": {
658 "operands": (1, 0),
659 "build_fcn": (TBuilder.SpaceToDepth, TGen.tgBasic, ArgGen.agSpaceToDepth),
660 "types": TYPE_F,
661 },
662 "depth_to_space": {
663 "operands": (1, 0),
664 "build_fcn": (TBuilder.DepthToSpace, TGen.tgBasic, ArgGen.agDepthToSpace),
665 "types": TYPE_F,
666 },
667 "one_hot": {
668 "operands": (3, 1),
669 "build_fcn": (TBuilder.OneHot, TGen.tgOneHot, ArgGen.agOneHot),
670 "types": TYPE_FI,
671 },
672 "fakequant": {
673 "operands": (1, 0),
674 "build_fcn": (
675 TBuilder.Fakequant,
676 TGen.tgBasic,
677 ArgGen.agFakequant,
678 ),
679 "types": {"tf": TYPE_F},
680 },
681 "resize_nearest": {
682 "operands": (1, 0),
683 "build_fcn": (TBuilder.ResizeNearest, TGen.tgPooling, ArgGen.agNone),
684 "types": {
685 "tf": TYPE_F,
686 "tflite": list(
687 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
688 ),
689 },
690 },
691 "resize_bilinear": {
692 "operands": (1, 0),
693 "build_fcn": (TBuilder.ResizeBilinear, TGen.tgPooling, ArgGen.agNone),
694 "types": {
695 "tf": TYPE_F,
696 "tflite": list(
697 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
698 ),
699 },
700 },
TatWai Chongf7326092022-06-08 12:17:14 -0700701 "resize_bilinear_v1_align_corners": {
702 "operands": (1, 0),
703 "build_fcn": (
704 TBuilder.ResizeBilinearV1AlignCorners,
705 TGen.tgPooling,
706 ArgGen.agNone,
707 ),
708 "types": {
709 "tf": TYPE_F,
710 "tflite": list(
711 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
712 ),
713 },
714 },
715 "resize_bilinear_v1_none": {
716 "operands": (1, 0),
717 "build_fcn": (TBuilder.ResizeBilinearV1None, TGen.tgPooling, ArgGen.agNone),
718 "types": {
719 "tf": TYPE_F,
720 "tflite": list(
721 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
722 ),
723 },
724 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000725 "left_shift": {
726 "operands": (1, 0),
727 "build_fcn": (TBuilder.LeftShift, TGen.tgBasic, ArgGen.agShift),
728 "types": {"tf": [tf.int32]},
729 },
730 "right_shift": {
731 "operands": (1, 0),
732 "build_fcn": (TBuilder.RightShift, TGen.tgBasic, ArgGen.agShift),
733 "types": {
734 "tf": [
735 tf.int32,
736 ]
737 },
738 },
739}
740
741# Shapes to be tested; default can be overwritten
742shape_list = [
743 (1,),
744 (64,),
745 (14, 19),
746 (13, 21, 3),
747 (1, 4, 4, 4),
748 (1, 8, 4, 17),
749 (1, 4, 8, 19),
750 (1, 32, 32, 8),
751 (1, 7, 7, 9),
752]
753
754
755def gen_rand_shapes(args):
756 """Overwrite the global shape list with a new list of random shapes"""
757 global shape_list
758
759 rng = np.random.default_rng(args.random_seed)
760
761 # Don't let things get too big... cap the maximum volume, but let
762 # an individual dimension be 1..47
763 max_total_volume = 32 * 32 * 4
764
765 shape_list = []
766 # Only iterate over ranks 2, 3, and 4
767 for rank in range(2, 5):
768 for n in range(args.random_shapes):
769 new_shape = rng.integers(1, 48, size=rank)
770
771 # Set the batch dimension on 4D objects to 1
772 if rank == 4:
773 new_shape[0] = 1
774
775 # Limit the total shape volume and throw out any
776 # shapes that wouldn't leave at least size=2 in some non-batch dimension
777 volume = 1
778 skip_shape = False
779 for i in range(rank):
780
781 volume *= new_shape[i]
782
783 # Reduce the shape, while it's larger than the maximum volume
784 while volume > max_total_volume:
785 new_shape[i] = new_shape[i] // 2
786 volume = volume // 2
787
788 # Now an untenable dimension size? Skip this one.
789 if new_shape[i] < 1:
790 skip_shape = True
791
792 if not skip_shape:
793 shape_list.append(tuple(new_shape))
794
795
796# Construct, run and save a whole tensorflow tf.function to a protobuf file
797# or convert to .tflite if it's quantized unit test
798def run_unit_test(
799 op_name,
800 args,
801 test_dir,
802 curr_shape,
803 addl_args,
804 dtype,
805 excluded_framework_list,
806 quantized_inference_dtype,
807 result_name,
808 seed,
809):
810
811 try:
812 op = TF_OP_LIST[op_name]
813 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
814
815 # Get and seed a random number generator for this test
816 rng = np.random.default_rng(seed)
817
818 # return placeholders=(str: name, np.array: value)
819 # consts=(str: name, np.array: value)
820 placeholders, consts = tensor_gen_fcn(op, curr_shape, dtype, rng)
821
822 # if test doesn't have any placeholders/consts, terminated
823 if len(placeholders) == 0 and len(consts) == 0:
824 return True
825
826 if not args.quiet:
827 print(" {} ".format(test_dir))
828
829 try:
830 os.mkdir(test_dir)
831 except FileExistsError:
832 pass
833
834 const_nodes = [value for name, value in consts]
835
836 num_placeholders = len(placeholders)
837 # if test is quantized, create tensor quantization metadata info for
838 # each input tensor, based on different quantized type
839 if quantized_inference_dtype:
840 is_quantized = True
841 # TODO: support INT8 IFM x INT4 weight later
842 if quantized_inference_dtype == QuantType.ALL_U8:
843 qzero = [128] * num_placeholders
844 numpy_dtype = [np.uint8] * num_placeholders
845 tflite_inference_dtype = tf.uint8
846 elif quantized_inference_dtype == QuantType.ALL_I8:
847 qzero = [0] * num_placeholders
848 numpy_dtype = [np.int8] * num_placeholders
849 tflite_inference_dtype = tf.int8
850 elif quantized_inference_dtype == QuantType.ALL_I16:
851 qzero = [0] * num_placeholders
852 numpy_dtype = [np.int16] * num_placeholders
853 tflite_inference_dtype = tf.int16
854 elif quantized_inference_dtype == QuantType.CONV_U8_U8:
855 assert (
856 num_placeholders == 1
857 ), "Unsupported number of placeholders for Convolution: {}".format(
858 num_placeholders
859 )
860 qzero = [128] * num_placeholders
861 if num_placeholders == 2:
862 numpy_dtype = [np.uint8, np.uint8]
863 else:
864 numpy_dtype = [np.uint8, np.uint8, np.int32]
865 tflite_inference_dtype = tf.uint8
866 elif quantized_inference_dtype == QuantType.CONV_I8_I8:
867 assert (
868 num_placeholders == 1
869 ), "Unsupported number of placeholders for Convolution: {}".format(
870 num_placeholders
871 )
872 qzero = [0] * num_placeholders
873 if num_placeholders == 2:
874 numpy_dtype = [np.int8, np.int8]
875 else:
876 numpy_dtype = [np.int8, np.int8, np.int32]
877 tflite_inference_dtype = tf.int8
878 elif quantized_inference_dtype == QuantType.CONV_I16_I8:
879 assert (
880 num_placeholders == 1
881 ), "Unsupported number of placeholders for Convolution: {}".format(
882 num_placeholders
883 )
884 if num_placeholders == 2:
885 qzero = [0, 0]
886 numpy_dtype = [np.int16, np.int8]
887 else:
888 qzero = [0, 0, 0]
889 numpy_dtype = [
890 np.int16,
891 np.int8,
892 np.int64,
893 ] # np.int64 to represent 40 bits accumulator
894 tflite_inference_dtype = tf.int16
895 else:
896 raise Exception(
897 "Unsupported fakequant dtype: {}".format(quantized_inference_dtype)
898 )
899
900 else:
901 is_quantized = False
902
903 tf_model_filename = None
904 tf_result_npy_filename = None
905 tf_result_name = None
906
907 tflite_model_filename = None
908 tflite_result_npy_filename = None
909 tflite_result_name = None
910
911 placeholder_names = []
912 placeholder_vals = []
913 placeholder_signatures = ()
914 placeholder_npy_filenames = []
915 placeholder_shapes = []
916
917 for idx, (name, val) in enumerate(placeholders):
918 placeholder_names.append(name)
919 placeholder_signatures = placeholder_signatures + (
920 tf.TensorSpec(shape=val.shape, dtype=val.dtype, name=name),
921 )
922 placeholder_npy_filenames.append("{}.npy".format(name.split(":")[0]))
923 placeholder_shapes.append(val.shape)
924
925 # Get test builder class
926 fcn_node = op_fcn(*const_nodes, *addl_args, result_name)
927 concrete_function = tf.function(input_signature=placeholder_signatures)(
928 fcn_node.eval
929 ).get_concrete_function()
930
931 if is_quantized:
932
933 assert dtype is tf.float32, "quantized test must come from float32 graph"
934
935 # 1. Quantize float placeholder npy to quantized to feed the graph
936 for idx, (name, val) in enumerate(placeholders):
937
938 # we use np.amin()/np.amax() to determine dynamic range
939 # for quantized test
940 zeropoint = 0
941 scale = 1.0
942 if numpy_dtype[idx] != np.int64:
943 qmin = np.iinfo(numpy_dtype[idx]).min
944 qmax = np.iinfo(numpy_dtype[idx]).max
945 num_bits = np.iinfo(numpy_dtype[idx]).bits
946 # 40 bit is represented as np.int64
947 else:
948 num_bits = 40
949 qmin = -(1 << num_bits)
950 qmax = (1 << num_bits) - 1
951
952 min_val = np.amin(val)
953 max_val = np.amax(val)
954
955 # for single value tensor, we set scale equal to the abs(value),
956 # and fix zeropoint to 128
957 # if val > 0, it'll be represented as 129,
958 # where val = (129 - 128) * val
959 # if val < 0, it'll be represented as 127,
960 # where val = (127 - 128) * (-val)
961 # if val == 0, it'll be represted as 128, with range [-128.0, 128.0]
962 # and let quantized 1 represent the value
963 # also adjust effective min/max consequently
964 if max_val == min_val:
965 if max_val != 0:
966 scale = abs(max_val)
967 else:
968 scale = 1.0
969 min_val = float(qmin - qzero[idx]) * scale
970 max_val = float(qmax - qzero[idx]) * scale
971 else:
972 scale = (max_val - min_val) / float(qmax - qmin)
973 zeropoint = int(round((-min_val) / scale)) + qmin
974
975 # run through tf.fakequant first to assure quantization error aligned
976 fakequant_val = tf.quantization.fake_quant_with_min_max_args(
977 val,
978 min=min_val,
979 max=max_val,
980 num_bits=num_bits,
981 name="gen_quant_npy",
982 )
983
984 quant_val = np.round(fakequant_val / scale).astype(np.int32) + zeropoint
985
986 # very few unit tests after TF hash may/2020, this quantized
987 # value for some reason exceed [0, 255] range
988 saved_val = np.clip(quant_val, qmin, qmax).astype(numpy_dtype[idx])
989
990 # saved all quantized tensor as np.int32
991 # since TOSA numpy Cpp API only supports int32
992 np.save(
993 os.path.join(test_dir, placeholder_npy_filenames[idx]),
994 saved_val.astype(np.int32),
995 False,
996 )
997
998 placeholder_vals.append(tf.convert_to_tensor(saved_val))
999
1000 # 2. Convert the model to quantized TFLite flatbuffer
1001 module = tf.Module()
1002 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1003 [concrete_function], module
1004 )
1005 converter.optimizations = [tf.lite.Optimize.DEFAULT]
1006 converter.experimental_new_converter = True
1007
1008 # use MLIR-based post-quantizer
1009 converter.experimental_new_quantizer = True
1010
1011 flag = (
1012 tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 # noqa: E501
1013 )
1014 if tflite_inference_dtype == tf.int16:
1015 converter.target_spec.supported_ops = [flag]
1016
1017 def input_stats():
1018 for i in range(0, args.num_samples):
1019 a = [
1020 TGen.getRand(shape, tf.float32, rng)
1021 for shape in placeholder_shapes
1022 ]
1023 yield a
1024
1025 converter.representative_dataset = input_stats
1026 converter.inference_input_type = tflite_inference_dtype
1027 converter.inference_output_type = tflite_inference_dtype
1028
1029 tflite_model = converter.convert()
1030
1031 tflite_model_filename = "model.tflite"
1032
1033 # Write out converted model to disk
1034 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1035 f.write(tflite_model)
1036
1037 else: # is_quantized is False
1038
1039 # 1. Saved out numpy array directly
1040 for idx, (name, val) in enumerate(placeholders):
1041 placeholder_vals.append(tf.convert_to_tensor(val))
1042 np.save(
1043 os.path.join(test_dir, placeholder_npy_filenames[idx]), val, False
1044 )
1045
1046 # 2.a Saved out .pb if framework includes tensorflow
1047 if "tf" not in excluded_framework_list:
1048 # Write out graph as protobuf to disk
1049 tf_model_filename = "model.pb"
1050 tf.io.write_graph(
1051 concrete_function.graph, test_dir, tf_model_filename, True
1052 )
1053
1054 # 2.b Saved out .tflite if framework includes tflite
1055 if "tflite" not in excluded_framework_list:
1056 # Convert the model to TFLite flatbuffer
1057 module = tf.Module()
1058 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1059 [concrete_function], module
1060 )
1061
1062 converter.experimental_new_converter = True
1063
1064 # Even it's non-quantized int32 test, this needs to be set to tf.float32
1065 converter.inference_input_type = tf.float32
1066 converter.inference_output_type = tf.float32
1067 tflite_model = converter.convert()
1068
1069 # Write out converted model to disk
1070 tflite_model_filename = "model.tflite"
1071 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1072 f.write(tflite_model)
1073
1074 # Get TF reference result if .pb is specified
1075 if tf_model_filename:
1076 tf_result_npy_filename = "tf_result.npy"
1077 tf_result = concrete_function(*placeholder_vals)
1078 np.save(os.path.join(test_dir, tf_result_npy_filename), tf_result, False)
1079
1080 tf_result_name = result_name
1081
1082 # Get TFLite inference result if .tflite is specified
1083 if tflite_model_filename:
1084 tflite_result_npy_filename = "tflite_result.npy"
1085
1086 ops_with_optimized_only_kernel = ["elu", "ceil", "gather"]
1087
1088 if args.tflite_kernel_mode == "optimized" or (
1089 op_name in ops_with_optimized_only_kernel
1090 ):
1091 interpreter = tf.lite.Interpreter(
1092 model_path=os.path.join(test_dir, tflite_model_filename)
1093 )
1094 elif args.tflite_kernel_mode == "reference":
1095 interpreter = tf.lite.Interpreter(
1096 model_path=os.path.join(test_dir, tflite_model_filename),
1097 experimental_op_resolver_type=OpResolverType.BUILTIN_REF,
1098 )
1099 else:
1100 assert 0, "unknown tflite interpreter mode {}".format(
1101 args.tflite_kernel_mode
1102 )
1103 interpreter.allocate_tensors()
1104
1105 input_details = interpreter.get_input_details()
1106 output_details = interpreter.get_output_details()
1107
1108 assert len(input_details) == len(
1109 placeholder_vals
1110 ), "number of placeholder mismatch"
1111
1112 for idx, val in enumerate(placeholder_vals):
1113 interpreter.set_tensor(input_details[idx]["index"], val.numpy())
1114
1115 interpreter.invoke()
1116 tflite_result = interpreter.get_tensor(output_details[0]["index"])
1117
1118 np.save(
1119 os.path.join(test_dir, tflite_result_npy_filename), tflite_result, False
1120 )
1121
1122 # Result tensor name would change after converting to TFLite flatbuffer
1123 # Overwrite the information from TFLite models directly.
1124 # Assume single result tensor now
1125 tflite_result_name = output_details[0]["name"]
1126
1127 # Write out test descriptor
1128 write_test_json(
1129 filename=os.path.join(test_dir, "test.json"),
1130 tf_model_filename=tf_model_filename,
1131 tf_result_npy_filename=tf_result_npy_filename,
1132 tf_result_name=tf_result_name,
1133 tflite_model_filename=tflite_model_filename,
1134 tflite_result_npy_filename=tflite_result_npy_filename,
1135 tflite_result_name=tflite_result_name,
1136 ifm_name=placeholder_names,
1137 ifm_file=placeholder_npy_filenames,
1138 ifm_shape=placeholder_shapes,
1139 framework_exclusions=excluded_framework_list,
1140 quantized=is_quantized,
1141 )
1142 except Exception as e:
1143 msg = "Error running task: {}".format(e)
1144 print(msg)
1145 print(
1146 "".join(
1147 traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)
1148 )
1149 )
1150 return False
1151 return True
1152
1153
1154def build_const_net(
1155 args,
1156 curr_shape,
1157 op_name,
1158 dtype,
1159 excluded_framework_list,
1160 quantized_inference_dtype,
1161 result_name,
1162 seed,
1163 rng,
1164 filter,
1165 unit_test_args,
1166):
1167
1168 if quantized_inference_dtype:
1169 quant_dtype = get_tf_dtype(quantized_inference_dtype)
1170 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, quant_dtype))
1171 else:
1172 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, dtype))
1173 test_dir = os.path.join(args.output_dir, test_dir)
1174
1175 # If the operator has an additional function to generate arguments, call it
1176 # here and iterate through the argument list that it generates
1177 op = TF_OP_LIST[op_name]
1178 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1179
1180 addl_args_tuple = arg_gen_fcn(op, curr_shape, rng)
1181 for desc, addl_args in addl_args_tuple:
Jeremy Johnson0e6218e2022-05-05 17:08:04 +01001182 # Only filter on the full test_name, not the output directory
1183 _, test_name = os.path.split(test_dir + desc)
1184 if not filter or filter.search(test_name):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001185 unit_test_args.append(
1186 [
1187 op_name,
1188 args,
1189 test_dir + desc,
1190 curr_shape,
1191 addl_args,
1192 dtype,
1193 excluded_framework_list,
1194 quantized_inference_dtype,
1195 result_name,
1196 seed,
1197 ]
1198 )
1199
1200
1201# python hash is not reproducible, create hash for our purpose
1202def op_name_hash(op_name):
1203 result = 0xDEADBEEF
1204 for ch in op_name:
1205 if result & 1:
1206 result = (ord(ch) << 24) ^ (result >> 1) ^ 0x82608EDB
1207 else:
1208 result = (ord(ch) << 24) ^ (result >> 1)
1209
1210 return result
1211
1212
1213def generate_op_tests(args, op_name, shape_list, result_name, filter, unit_test_args):
1214
1215 if not args.quiet:
1216 print(
1217 "Generating tests for {} ".format(
1218 op_name
1219 )
1220 )
1221
1222 op = TF_OP_LIST[op_name]
1223
1224 # Seed the RNG so that we get the same random tests for each test each time
1225 # If the number of tests for a given generation function changes, the tests
1226 # for that operator may also change accordingly, but this will at least keep
1227 # down churn across operators.
1228
1229 bounded_hash_val = (args.random_seed + op_name_hash(op_name)) % np.iinfo(
1230 np.int32
1231 ).max
1232 rng = np.random.default_rng(bounded_hash_val)
1233
1234 # this is a dictionary with 'tf' and 'tflite' as key
1235 # and value being the data types we want to test under these framework
1236
1237 if isinstance(op["types"], dict):
1238 try:
1239 tf_dtypes = op["types"]["tf"]
1240 except KeyError:
1241 tf_dtypes = []
1242 try:
1243 tflite_dtypes = op["types"]["tflite"]
1244 except KeyError:
1245 tflite_dtypes = []
1246 elif isinstance(op["types"], list):
1247 tf_dtypes = op["types"]
1248 tflite_dtypes = op["types"]
1249
1250 tf_nonquantized_dtypes = tf_dtypes # tf doesn't support quantized data types
1251 tflite_quantized_dtypes = []
1252 tflite_nonquantized_dtypes = []
1253 for dtype in tflite_dtypes:
1254 if isinstance(dtype, QuantType):
1255 tflite_quantized_dtypes.append(dtype)
1256 else:
1257 tflite_nonquantized_dtypes.append(dtype)
1258
1259 nonquantized_dtypes_set = set(tf_nonquantized_dtypes).union(
1260 set(tflite_nonquantized_dtypes)
1261 )
1262 nonquantized_dtypes = list(nonquantized_dtypes_set)
1263 quantized_dtypes = tflite_quantized_dtypes
1264
1265 # populate non quantized unit test arguments
1266 for dtype in nonquantized_dtypes:
1267
1268 excluded_framework_set = set(ALL_FRAMEWORKS)
1269 if dtype in tf_nonquantized_dtypes:
1270 excluded_framework_set.remove("tf")
1271 if dtype in tflite_nonquantized_dtypes:
1272 excluded_framework_set.remove("tflite")
1273 excluded_framework_list = list(excluded_framework_set)
1274
1275 for curr_shape in shape_list:
1276 build_const_net(
1277 args,
1278 curr_shape,
1279 op_name,
1280 dtype,
1281 excluded_framework_list,
1282 None,
1283 result_name,
1284 bounded_hash_val,
1285 rng,
1286 filter,
1287 unit_test_args,
1288 )
1289
1290 # populate quantized unit test arguments
1291 # must exclude 'tf' and source dtype being tf.float32
1292 for dtype in quantized_dtypes:
1293 for curr_shape in shape_list:
1294 build_const_net(
1295 args,
1296 curr_shape,
1297 op_name,
1298 tf.float32,
1299 ["tf"],
1300 dtype,
1301 result_name,
1302 bounded_hash_val,
1303 rng,
1304 filter,
1305 unit_test_args,
1306 )
1307
1308 return unit_test_args
1309
1310
1311def createDynamicOpLists():
1312 """The templated operators are conv2d-style operators with a number of kernel
1313 sizes. Since the operator is unchanged, we generate the range of kernel
1314 sizes here in this loop and remove the original templates from the list.
1315
1316 This could be expanded to non-conv2d-style operators in the future."""
1317
1318 # Dynamically create op lists for convolutions with a list of kernel sizes
1319 KERNELS = [
1320 [1, 1],
1321 [3, 3],
1322 [5, 5],
1323 ]
1324
1325 TEMPLATE_LIST = [
1326 "conv2d",
1327 "conv2d_bias",
1328 "conv2d_relu",
1329 "conv2d_relu6",
1330 "conv2d_relu_n1_to_1",
1331 "conv2d_tanh",
1332 "depthwise_conv2d",
1333 "depthwise_conv2d_bias",
1334 "transpose_conv2d",
1335 ]
1336
1337 for t in TEMPLATE_LIST:
1338 for k in KERNELS:
1339 testName = "{}_{}x{}".format(t, k[0], k[1])
1340 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1341 TF_OP_LIST[testName]["filter"] = k
1342 TF_OP_LIST[testName]["template"] = False
1343
1344 # Delete any templates after having created any dynamic ops
1345 # This is a two-pass operation because it's bad practice to delete
1346 # keys from dictionaries while iterating
1347 keyList = []
1348 for k in TF_OP_LIST:
1349 try:
1350 if TF_OP_LIST[k]["template"]:
1351 keyList.append(k)
1352 continue
1353 except KeyError:
1354 pass
1355
1356 for k in keyList:
1357 del TF_OP_LIST[k]
1358
1359
1360def main():
1361 parser = argparse.ArgumentParser()
1362 parser.add_argument(
1363 "--seed", dest="random_seed", default=42, type=int, help="Random seed"
1364 )
1365 parser.add_argument(
1366 "--random-shapes",
1367 dest="random_shapes",
1368 default=0,
1369 type=int,
1370 help=(
1371 "Use N random shapes of each rank for generating tests,"
1372 "seeded with random seed"
1373 ),
1374 )
1375 parser.add_argument(
1376 "-o",
1377 "--output-dir",
1378 dest="output_dir",
1379 default=".",
1380 type=str,
1381 help="Test output directory path prefix",
1382 )
1383 parser.add_argument(
1384 "-q",
1385 "--quiet",
1386 dest="quiet",
1387 default=False,
1388 action="store_true",
1389 help="Do not print test names",
1390 )
1391 parser.add_argument(
1392 "-j", "--jobs", dest="jobs", type=int, default=1, help="Number of parallel jobs"
1393 )
1394 parser.add_argument(
1395 "-m",
1396 "--tflite-kernel-mode",
1397 dest="tflite_kernel_mode",
1398 type=str,
1399 choices=["reference", "optimized"],
1400 default="reference",
1401 help="TFLite interpreter kernel mode",
1402 )
1403 parser.add_argument(
1404 "--num-samples",
1405 dest="num_samples",
1406 default=200,
1407 type=int,
1408 help="Number of input samples for post-training quantization",
1409 )
1410 parser.add_argument(
1411 "--filter",
1412 dest="filter",
1413 default="",
1414 type=str,
1415 help="Filter test names by this expression",
1416 )
1417 args = parser.parse_args()
1418
1419 # Turn the filter into a re object if present
1420 filter = None
1421 if args.filter != "":
1422 filter = re.compile(args.filter)
1423
1424 # Autodetect CPU count
1425 if args.jobs <= 0:
1426 args.jobs = os.cpu_count()
1427
1428 # Disable TF info messages
1429 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
1430
1431 try:
1432 os.makedirs(args.output_dir)
1433 except FileExistsError:
1434 pass
1435
1436 if args.random_shapes:
1437 gen_rand_shapes(args)
1438
1439 # Build dynamic ops
1440 createDynamicOpLists()
1441
1442 # Generate the test list and arguments to run_unit_test()
1443 unit_test_args = []
1444
1445 for op in TF_OP_LIST:
1446 generate_op_tests(args, op, shape_list, "result", filter, unit_test_args)
1447
1448 errors = 0
1449 for t in unit_test_args:
1450 if not run_unit_test(*t):
1451 errors = errors + 1
1452
1453 if not args.quiet:
1454 print("\nAll tasks done - with {} errors".format(errors))
1455
1456 return 1 if errors else 0
1457
1458
1459if __name__ == "__main__":
1460 exit(main())