blob: 60106a14fbc50aee7be1de4f080edecc29084aa1 [file] [log] [blame]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001#!/usr/bin/env python3
Luke Hutton261b7b62023-01-10 14:50:31 +00002# Copyright (c) 2020-2023, ARM Limited.
Jeremy Johnson015c3552022-02-23 12:15:03 +00003# SPDX-License-Identifier: Apache-2.0
4import argparse
5import os
6import re
7import traceback
8
9import numpy as np
10
11# Level | Level for Humans | Level Description
12# -------|------------------|------------------------------------
13# 0 | DEBUG | [Default] Print all messages
14# 1 | INFO | Filter out INFO messages
15# 2 | WARNING | Filter out INFO & WARNING messages
16# 3 | ERROR | Filter out all messages
17# Filter tensorflow debug message except errors
18os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
19
20# Flake8 E402 - ignore imports not at top of file to allow os.environ setting
21import tensorflow as tf # noqa: E402
22from frameworks.write_test_json import write_test_json # noqa: E402
23from frameworks.arg_gen import ArgGen # noqa: E402
24from frameworks.tensor_gen import TGen # noqa: E402
25from frameworks.test_builder import TBuilder # noqa: E402
Jeremy Johnson5d1a3472022-03-31 09:50:06 +010026from frameworks.test_gen_utils import ( # noqa: E402
Jeremy Johnson015c3552022-02-23 12:15:03 +000027 QuantType,
28 get_tf_dtype,
29 get_shape_str,
30) # noqa: E402
31from tensorflow.lite.python.interpreter import OpResolverType # noqa: E402
32
33# All of the supported frameworks
34ALL_FRAMEWORKS = ["tf", "tflite"]
35
36# Lists of different data types
37TYPE_F = [tf.float32]
38TYPE_I = [tf.int32]
39TYPE_FI = [tf.float32, tf.int32]
40TYPE_B = [tf.bool]
41TYPE_FIB = [tf.float32, tf.int32, tf.bool]
42TYPE_H = [tf.float16]
43TYPE_FH = [tf.float32, tf.float16]
44TYPE_FHI = [tf.float32, tf.float16, tf.int32]
45TYPE_FHIB = [tf.float32, tf.float16, tf.int32, tf.bool]
46
47# The list of operator tests
48# Each dictionary entry for an op is a dictionary with the following required members:
49# 'operands': tuple (number_of_placeholder_tensors, number_of_constant_tensors)
50# 'build_fcn: tuple (Test builder function, Tensor generator function,
51# Argument generator function)
52# 'types': list of Tensorflow types that should be tested for this op
53# OR
54# a dictionary of {'framework_name': [type_list] } for cases where only
55# a subset of the types should be tested in each framework. This can also
56# be used to restrict an operator to a particular framework.
57#
58# And optional members:
59# 'template': boolean (indicates that this is a templated op which gets further
60# processing in createDynamicOpLists)
61# 'bias': boolean indicating that there is a bias component to be generated
62# 'qtypes': List of QuantType quantized types to generate for this op
TatWai Chongfd629052022-07-25 04:01:58 +000063# 'rank': tuple (lowest rank, highest rank). Dimension range of input tensor.
Jeremy Johnson015c3552022-02-23 12:15:03 +000064
65TF_OP_LIST = {
66 "add": {
67 "operands": (2, 0),
68 "build_fcn": (TBuilder.Add, TGen.tgBFuzz, ArgGen.agNone),
69 "types": {
70 "tf": TYPE_FI,
71 "tflite": list(
72 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
73 ),
74 },
75 },
76 "sub": {
77 "operands": (2, 0),
78 "build_fcn": (TBuilder.Sub, TGen.tgBFuzz, ArgGen.agNone),
79 "types": {
80 "tf": TYPE_FI,
81 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
82 # QuantType.ALL_I16 fail in TFLite conversion
83 },
84 },
85 "mul": {
86 "operands": (2, 0),
87 "build_fcn": (TBuilder.Mul, TGen.tgBFuzz, ArgGen.agNone),
88 "types": {
89 "tf": TYPE_FI,
90 "tflite": list(
91 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
92 ),
93 },
94 },
95 "exp": {
96 "operands": (1, 0),
97 "build_fcn": (TBuilder.Exp, TGen.tgBasic, ArgGen.agNone),
98 "types": TYPE_F,
99 },
100 "rcp": {
101 "operands": (1, 0),
102 "build_fcn": (TBuilder.Rcp, TGen.tgBasic, ArgGen.agNone),
103 "types": TYPE_F,
104 },
105 "relu": {
106 "operands": (1, 0),
107 "build_fcn": (TBuilder.Relu, TGen.tgBasic, ArgGen.agNone),
108 "types": {
109 "tf": TYPE_F,
110 "tflite": list(
111 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
112 ),
113 },
114 },
Jerry Ge93912432022-07-22 10:29:13 -0700115 "relu1": {
116 "operands": (1, 0),
117 "build_fcn": (TBuilder.Relu1, TGen.tgBasic, ArgGen.agNone),
118 "types": {
119 "tf": [],
120 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
121 },
122 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000123 "relu6": {
124 "operands": (1, 0),
125 "build_fcn": (TBuilder.Relu6, TGen.tgBasic, ArgGen.agNone),
126 "types": {
127 "tf": TYPE_F,
128 "tflite": list(
129 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
130 ),
131 },
132 },
133 "leaky_relu": {
134 "operands": (1, 0),
135 "build_fcn": (TBuilder.LeakyRelu, TGen.tgBasic, ArgGen.agFloat),
136 "types": {
137 "tf": TYPE_F,
138 "tflite": list(
139 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
140 ),
141 },
142 },
TatWai Chong41a04fe2022-11-03 21:44:32 +0000143 "prelu": {
144 "operands": (1, 0),
145 "build_fcn": (TBuilder.Prelu, TGen.tgBasic, ArgGen.agNone),
146 "types": {
147 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
148 },
149 },
TatWai Chong473eb382022-08-02 04:21:30 +0000150 "gelu": {
151 "operands": (1, 0),
152 "build_fcn": (TBuilder.Gelu, TGen.tgBasic, ArgGen.agNone),
153 "types": {
154 # Need compiler support for tf.Erf.
155 # "tf": TYPE_F,
156 "tflite": list(
157 # Only float32, int8 and uint8 supported currently
158 TYPE_F
159 + [QuantType.ALL_U8, QuantType.ALL_I8]
160 ),
161 },
162 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000163 "concat": {
164 "operands": (2, 0),
165 "build_fcn": (TBuilder.Concat, TGen.tgBasic, ArgGen.agAxes),
166 "types": TYPE_FI,
167 },
168 "bitwise_and": {
169 "operands": (2, 0),
170 "build_fcn": (TBuilder.BitwiseAnd, TGen.tgBFuzz, ArgGen.agNone),
171 "types": {"tf": TYPE_I}, # Not supported in TF Lite
172 },
173 "bitwise_or": {
174 "operands": (2, 0),
175 "build_fcn": (TBuilder.BitwiseOr, TGen.tgBFuzz, ArgGen.agNone),
176 "types": {"tf": TYPE_I}, # Not supported in TF Lite
177 },
178 "bitwise_not": {
179 "operands": (1, 0),
180 "build_fcn": (TBuilder.BitwiseNot, TGen.tgBFuzz, ArgGen.agNone),
181 "types": {"tf": TYPE_I}, # Not supported in TF Lite
182 },
183 "bitwise_xor": {
184 "operands": (2, 0),
185 "build_fcn": (TBuilder.BitwiseXor, TGen.tgBFuzz, ArgGen.agNone),
186 "types": {"tf": TYPE_I}, # Not supported in TF Lite
187 },
188 "logical_and": {
189 "operands": (2, 0),
190 "build_fcn": (TBuilder.LogicalAnd, TGen.tgBFuzz, ArgGen.agNone),
191 "types": TYPE_B,
192 },
193 "logical_or": {
194 "operands": (2, 0),
195 "build_fcn": (TBuilder.LogicalOr, TGen.tgBFuzz, ArgGen.agNone),
196 "types": TYPE_B,
197 },
198 "logical_not": {
199 "operands": (1, 0),
200 "build_fcn": (TBuilder.LogicalNot, TGen.tgBFuzz, ArgGen.agNone),
201 "types": TYPE_B,
202 },
203 "reduce_any": {
204 "operands": (1, 0),
205 "build_fcn": (TBuilder.ReduceAny, TGen.tgBasic, ArgGen.agAxesListKeepdims),
206 "types": TYPE_B,
207 },
208 "reduce_all": {
209 "operands": (1, 0),
210 "build_fcn": (TBuilder.ReduceAll, TGen.tgBasic, ArgGen.agAxesListKeepdims),
211 "types": {"tf": TYPE_B},
212 },
213 "reduce_min": {
214 "operands": (1, 0),
215 "build_fcn": (TBuilder.ReduceMin, TGen.tgBasic, ArgGen.agAxesListKeepdims),
216 "types": {
217 "tf": TYPE_FI,
218 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
219 },
220 },
221 "reduce_max": {
222 "operands": (1, 0),
223 "build_fcn": (TBuilder.ReduceMax, TGen.tgBasic, ArgGen.agAxesListKeepdims),
224 "types": {
225 "tf": TYPE_FI,
226 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
227 },
228 },
229 "reduce_sum": {
230 "operands": (1, 0),
231 "build_fcn": (TBuilder.ReduceSum, TGen.tgBasic, ArgGen.agAxesListKeepdims),
232 "types": {
233 "tf": TYPE_F,
234 # v2 converter doesn't recognize quantized reduce_sum
235 # "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
236 "tflite": TYPE_F,
237 },
238 },
239 "reduce_mean": {
240 "operands": (1, 0),
241 "build_fcn": (TBuilder.ReduceMean, TGen.tgBasic, ArgGen.agAxesListKeepdims),
242 "types": {
243 "tf": TYPE_F,
244 "tflite": list(
245 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
246 ),
247 },
248 },
249 "reduce_product": {
250 "operands": (1, 0),
251 "build_fcn": (TBuilder.ReduceProduct, TGen.tgBasic, ArgGen.agAxesListKeepdims),
252 "types": TYPE_F,
253 },
254 "min": {
255 "operands": (2, 0),
256 "build_fcn": (TBuilder.Min, TGen.tgBFuzz, ArgGen.agNone),
257 "types": TYPE_FI,
258 },
259 "max": {
260 "operands": (2, 0),
261 "build_fcn": (TBuilder.Max, TGen.tgBFuzz, ArgGen.agNone),
262 "types": TYPE_FI,
263 },
264 "pow": {
265 "operands": (2, 0),
266 "build_fcn": (TBuilder.Pow, TGen.tgBFuzz, ArgGen.agNone),
267 # Technically, integer is supported, but only for positive exponents.
268 # Needs a random argument generator.
269 "types": TYPE_F,
270 },
271 "abs": {
272 "operands": (1, 0),
273 "build_fcn": (TBuilder.Abs, TGen.tgBasic, ArgGen.agNone),
274 "types": TYPE_F,
275 },
276 "ceil": {
277 "operands": (1, 0),
278 "build_fcn": (TBuilder.Ceil, TGen.tgBasic, ArgGen.agNone),
279 "types": TYPE_F,
280 },
281 "floor": {
282 "operands": (1, 0),
283 "build_fcn": (TBuilder.Floor, TGen.tgBasic, ArgGen.agNone),
284 "types": TYPE_F,
285 },
286 "log": {
287 "operands": (1, 0),
288 "build_fcn": (TBuilder.Log, TGen.tgBasic, ArgGen.agNone),
289 "types": TYPE_F,
290 },
291 "negate": {
292 "operands": (1, 0),
293 "build_fcn": (TBuilder.Negate, TGen.tgBasic, ArgGen.agNone),
294 "types": TYPE_F,
295 },
296 "rsqrt": {
297 "operands": (1, 0),
298 "build_fcn": (TBuilder.Rsqrt, TGen.tgBasic, ArgGen.agNone),
299 "types": TYPE_F,
300 },
TatWai Chongd713a4d2022-11-10 13:54:28 -0800301 "sign": {
302 "operands": (1, 0),
303 "build_fcn": (TBuilder.Sign, TGen.tgBasic, ArgGen.agNone),
304 "types": {
305 "tf": TYPE_F,
306 },
307 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000308 "sigmoid": {
309 "operands": (1, 0),
310 "build_fcn": (TBuilder.Sigmoid, TGen.tgBasic, ArgGen.agNone),
311 "types": {
312 "tf": TYPE_F,
313 "tflite": list(
314 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
315 ),
316 },
317 },
318 "tanh": {
319 "operands": (1, 0),
320 "build_fcn": (TBuilder.Tanh, TGen.tgBasic, ArgGen.agNone),
321 "types": {
322 "tf": TYPE_F,
323 "tflite": list(
324 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
325 ),
326 },
327 },
Luke Hutton41601862022-12-06 17:29:15 +0000328 "sin": {
329 "operands": (1, 0),
330 "build_fcn": (TBuilder.Sin, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000331 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000332 },
333 "cos": {
334 "operands": (1, 0),
335 "build_fcn": (TBuilder.Cos, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000336 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000337 },
Luke Hutton2138a192022-12-15 11:01:39 +0000338 "atan2": {
339 "operands": (2, 0),
340 "build_fcn": (TBuilder.Atan2, TGen.tgBasic, ArgGen.agNone),
341 "types": {
342 "tflite": TYPE_F,
343 },
344 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000345 "square": {
346 "operands": (1, 0),
347 "build_fcn": (TBuilder.Square, TGen.tgBasic, ArgGen.agNone),
348 "types": TYPE_F,
349 },
350 "squared_difference": {
351 "operands": (2, 0),
352 "build_fcn": (TBuilder.SquaredDifference, TGen.tgBFuzz, ArgGen.agNone),
353 "types": TYPE_F,
354 },
355 "equal": {
356 "operands": (2, 0),
357 "build_fcn": (TBuilder.Equal, TGen.tgBFuzz, ArgGen.agNone),
358 "types": TYPE_FI,
359 },
360 "greater_equal": {
361 "operands": (2, 0),
362 "build_fcn": (TBuilder.GreaterEqual, TGen.tgBFuzz, ArgGen.agNone),
363 "types": TYPE_FI,
364 },
365 "greater": {
366 "operands": (2, 0),
367 "build_fcn": (TBuilder.Greater, TGen.tgBFuzz, ArgGen.agNone),
368 "types": TYPE_FI,
369 },
370 "less": {
371 "operands": (2, 0),
372 "build_fcn": (TBuilder.Less, TGen.tgBFuzz, ArgGen.agNone),
373 "types": TYPE_FI,
374 },
375 "less_equal": {
376 "operands": (2, 0),
377 "build_fcn": (TBuilder.LessEqual, TGen.tgBFuzz, ArgGen.agNone),
378 "types": TYPE_FI,
379 },
380 "conv2d_TEMPLATE": {
381 "operands": (1, 1),
382 "build_fcn": (TBuilder.Conv2d, TGen.tgConv2d, ArgGen.agConv2d),
383 "types": {
384 "tf": [tf.float32],
385 "tflite": [
386 tf.float32,
387 QuantType.CONV_U8_U8,
388 QuantType.CONV_I8_I8,
389 QuantType.CONV_I16_I8,
390 ],
391 },
392 "template": True,
393 },
394 "conv2d_relu_TEMPLATE": {
395 "operands": (1, 2),
396 "build_fcn": (TBuilder.Conv2dRelu, TGen.tgConv2d, ArgGen.agNone),
397 "types": {
398 "tf": [tf.float32],
399 "tflite": [
400 tf.float32,
401 QuantType.CONV_U8_U8,
402 QuantType.CONV_I8_I8,
403 QuantType.CONV_I16_I8,
404 ],
405 },
406 "template": True,
407 },
408 "conv2d_relu6_TEMPLATE": {
409 "operands": (1, 2),
410 "build_fcn": (TBuilder.Conv2dRelu6, TGen.tgConv2d, ArgGen.agNone),
411 "types": {
412 "tf": [tf.float32],
413 "tflite": [
414 tf.float32,
415 QuantType.CONV_U8_U8,
416 QuantType.CONV_I8_I8,
417 QuantType.CONV_I16_I8,
418 ],
419 },
420 "template": True,
421 },
422 "conv2d_relu_n1_to_1_TEMPLATE": {
423 "operands": (1, 2),
424 "build_fcn": (TBuilder.Conv2dReluN1To1, TGen.tgConv2d, ArgGen.agNone),
425 "types": {
426 "tf": [tf.float32],
427 "tflite": [
428 tf.float32,
429 QuantType.CONV_U8_U8,
430 QuantType.CONV_I8_I8,
431 QuantType.CONV_I16_I8,
432 ],
433 },
434 "template": True,
435 },
436 # This test is converted as:
437 # tfl.conv2d(){fused_activation_function="NONE"} + tfl.tanh()
438 # TODO: anyway to generate tfl.conv2d(){fused_activation_function="TANH"}?
439 "conv2d_tanh_TEMPLATE": {
440 "operands": (1, 2),
441 "build_fcn": (TBuilder.Conv2dTanh, TGen.tgConv2d, ArgGen.agNone),
442 "types": {
443 "tf": [tf.float32],
444 "tflite": [
445 tf.float32,
446 QuantType.CONV_U8_U8,
447 QuantType.CONV_I8_I8,
448 QuantType.CONV_I16_I8,
449 ],
450 },
451 "template": True,
452 },
453 "conv2d_bias_TEMPLATE": {
454 "operands": (1, 2),
455 "build_fcn": (TBuilder.Conv2dWithBias, TGen.tgConv2d, ArgGen.agConv2d),
456 "types": {
457 "tf": [tf.float32],
458 "tflite": [
459 tf.float32,
460 QuantType.CONV_U8_U8,
461 QuantType.CONV_I8_I8,
462 QuantType.CONV_I16_I8,
463 ],
464 },
465 "bias": True,
466 "template": True,
467 },
TatWai Chongfd629052022-07-25 04:01:58 +0000468 "conv3d_TEMPLATE": {
469 "operands": (1, 1),
470 "build_fcn": (TBuilder.Conv3d, TGen.tgConv3d, ArgGen.agConv3d),
471 "types": {
472 "tf": [tf.float32],
473 "tflite": [
474 tf.float32,
475 QuantType.CONV_U8_U8,
476 QuantType.CONV_I8_I8,
477 # Quantization to 16x8-bit not yet supported by tflite.
478 ],
479 },
480 "template": True,
481 "rank": (1, 5),
482 },
483 "conv3d_bias_TEMPLATE": {
484 "operands": (1, 2),
485 "build_fcn": (TBuilder.Conv3dWithBias, TGen.tgConv3d, ArgGen.agConv3d),
486 "types": {
487 "tf": [tf.float32],
488 "tflite": [
489 tf.float32,
490 QuantType.CONV_U8_U8,
491 QuantType.CONV_I8_I8,
492 # Quantization to 16x8-bit not yet supported by tflite.
493 ],
494 },
495 "bias": True,
496 "template": True,
497 "rank": (1, 5),
498 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000499 "depthwise_conv2d_TEMPLATE": {
500 "operands": (1, 1),
501 "build_fcn": (
502 TBuilder.DepthwiseConv2d,
503 TGen.tgDepthwiseConv2d,
504 ArgGen.agDepthwiseConv2d,
505 ),
506 "types": {
507 "tf": [tf.float32],
508 "tflite": [
509 tf.float32,
510 QuantType.CONV_U8_U8,
511 QuantType.CONV_I8_I8,
512 QuantType.CONV_I16_I8,
513 ],
514 },
515 "template": True,
516 },
517 "depthwise_conv2d_bias_TEMPLATE": {
518 "operands": (1, 2),
519 "build_fcn": (
520 TBuilder.DepthwiseConv2dWithBias,
521 TGen.tgDepthwiseConv2d,
522 ArgGen.agDepthwiseConv2d,
523 ),
524 "types": {
525 "tf": [tf.float32],
526 "tflite": [
527 tf.float32,
528 QuantType.CONV_U8_U8,
529 QuantType.CONV_I8_I8,
530 QuantType.CONV_I16_I8,
531 ],
532 },
533 "bias": True,
534 "template": True,
535 },
536 "transpose_conv2d_TEMPLATE": {
537 "operands": (1, 1),
538 "build_fcn": (
539 TBuilder.TransposeConv2d,
540 TGen.tgTransposeConv2d,
541 ArgGen.agTransposeConv2d,
542 ),
543 "types": {
544 "tf": [tf.float32],
545 "tflite": [
546 tf.float32,
547 QuantType.CONV_U8_U8,
548 QuantType.CONV_I8_I8,
549 QuantType.CONV_I16_I8,
550 ],
551 },
552 "template": True,
553 },
554 "argmax": {
555 "operands": (1, 0),
556 "build_fcn": (TBuilder.Argmax, TGen.tgBasic, ArgGen.agAxes),
557 "types": {"tf": TYPE_F},
558 },
559 "avg_pool2d": {
560 "operands": (1, 0),
561 "build_fcn": (TBuilder.AvgPool2d, TGen.tgPooling, ArgGen.agPooling),
562 "types": {
563 "tf": TYPE_F,
564 "tflite": list(
565 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
566 ),
567 },
568 },
569 "max_pool2d": {
570 "operands": (1, 0),
571 "build_fcn": (TBuilder.MaxPool2d, TGen.tgPooling, ArgGen.agPooling),
572 "types": {
573 "tf": TYPE_F,
574 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
575 # ALL_I16 not supported yet
576 # In tensorflow/compiler/mlir/lite/ir/tfl_ops.td,
577 # QI16 is missing from MaxPoolOperandAndResultConstraints
578 # If adding QI16 back this test can run through.
579 },
580 },
581 "reshape": {
582 "operands": (1, 0),
583 "build_fcn": (TBuilder.Reshape, TGen.tgBasic, ArgGen.agReshape),
584 "types": TYPE_FI,
585 },
586 "transpose": {
587 "operands": (1, 0),
588 "build_fcn": (TBuilder.Transpose, TGen.tgBasic, ArgGen.agTranspose),
589 "types": TYPE_FI,
590 },
591 "slice": {
592 "operands": (1, 0),
593 "build_fcn": (TBuilder.Slice, TGen.tgBasic, ArgGen.agSlice),
594 "types": TYPE_FI,
595 },
596 "strided_slice": {
597 "operands": (1, 0),
598 "build_fcn": (TBuilder.StridedSlice, TGen.tgBasic, ArgGen.agStridedSlice),
599 "types": TYPE_FI,
600 },
601 "select": {
602 "operands": (3, 0),
603 "build_fcn": (TBuilder.Select, TGen.tgSelect, ArgGen.agNone),
604 "types": TYPE_FI,
605 },
606 "addn": {
607 "operands": (4, 0),
608 "build_fcn": (TBuilder.Addn, TGen.tgBasic, ArgGen.agNone),
609 "types": TYPE_FI,
610 },
611 "concatv2": {
612 "operands": (4, 0),
613 "build_fcn": (TBuilder.Concatv2, TGen.tgBasic, ArgGen.agAxes),
614 "types": TYPE_FI,
615 },
616 "stack": {
617 "operands": (4, 0),
618 "build_fcn": (TBuilder.Stack, TGen.tgBasic, ArgGen.agStack),
619 "types": TYPE_FI,
620 },
621 "unstack": {
622 "operands": (1, 0),
623 "build_fcn": (TBuilder.Unstack, TGen.tgPooling, ArgGen.agAxes),
624 "types": TYPE_F,
625 },
TatWai Chongf7008da2022-09-09 09:35:40 +0000626 "mirrorpad": {
627 "operands": (1, 0),
628 "build_fcn": (TBuilder.MirrorPad, TGen.tgBasic, ArgGen.agMirrorPad),
629 "types": TYPE_FI,
630 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000631 "pad": {
632 "operands": (1, 0),
633 "build_fcn": (TBuilder.Pad, TGen.tgBasic, ArgGen.agPad),
634 "types": TYPE_F,
635 },
636 "expand_dims": {
637 "operands": (1, 0),
638 "build_fcn": (TBuilder.ExpandDims, TGen.tgBasic, ArgGen.agStack),
639 "types": TYPE_FI,
640 },
641 "shape": {
642 "operands": (1, 0),
643 "build_fcn": (TBuilder.Shape, TGen.tgBasic, ArgGen.agNone),
644 "types": TYPE_FI,
645 },
646 "rank": {
647 "operands": (1, 0),
648 "build_fcn": (TBuilder.Rank, TGen.tgBasic, ArgGen.agNone),
649 "types": TYPE_FI,
650 },
651 "fill": {
652 "operands": (1, 0),
653 "build_fcn": (TBuilder.Fill, TGen.tgBasic, ArgGen.agFill),
654 "types": TYPE_FI,
655 },
656 "elu": {
657 "operands": (1, 0),
658 "build_fcn": (TBuilder.Elu, TGen.tgBasic, ArgGen.agNone),
659 "types": TYPE_F,
660 },
661 "softmax": {
662 "operands": (1, 0),
663 "build_fcn": (TBuilder.Softmax, TGen.tgBasic, ArgGen.agNone),
664 "types": {
665 "tf": TYPE_F,
666 "tflite": list(
667 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
668 ),
669 },
670 },
671 "log_softmax": {
672 "operands": (1, 0),
673 "build_fcn": (TBuilder.LogSoftmax, TGen.tgBasic, ArgGen.agNone),
674 "types": TYPE_F,
675 },
676 "matmul": {
677 "operands": (2, 0),
678 "build_fcn": (TBuilder.MatMul, TGen.tgMatmul, ArgGen.agNone),
679 "types": {
680 "tf": TYPE_F,
681 "tflite": list(
682 TYPE_F
683 + [QuantType.ALL_U8, QuantType.ALL_I8]
684 # 16 bits matmul fail to convert
685 ),
686 },
687 },
688 "add_scalar": {
689 "operands": (1, 0),
690 "build_fcn": (TBuilder.AddScalar, TGen.tgBasic, ArgGen.agNone),
691 "types": TYPE_F,
692 },
693 "add_1d": {
694 "operands": (2, 0),
695 "build_fcn": (TBuilder.Add1d, TGen.tgBasic, ArgGen.agNone),
696 "types": TYPE_F,
697 },
698 "split": {
699 "operands": (1, 0),
700 "build_fcn": (TBuilder.Split, TGen.tgBasic, ArgGen.agSplit),
701 "types": TYPE_FI,
702 },
703 "tile": {
704 "operands": (1, 0),
705 "build_fcn": (TBuilder.Tile, TGen.tgBasic, ArgGen.agTile),
706 "types": TYPE_FI,
707 },
708 "reverse": {
709 "operands": (1, 0),
710 "build_fcn": (TBuilder.Reverse, TGen.tgBasic, ArgGen.agAxes),
711 "types": {"tf": TYPE_FI},
712 },
713 "gather": {
714 "operands": (1, 0),
715 "build_fcn": (TBuilder.Gather, TGen.tgBasic, ArgGen.agGather),
716 "types": TYPE_FI,
717 },
718 "gather_nd": {
719 "operands": (1, 0),
720 "build_fcn": (TBuilder.GatherNd, TGen.tgBasic, ArgGen.agGatherND),
721 "types": TYPE_FI,
722 },
723 "scatter_nd": {
724 "operands": (1, 0),
725 "build_fcn": (TBuilder.ScatterNd, TGen.tgBasic, ArgGen.agScatterND),
726 "types": TYPE_FI,
727 },
728 "space_to_batch": {
729 "operands": (1, 0),
730 "build_fcn": (TBuilder.SpaceToBatch, TGen.tgBasic, ArgGen.agSpaceToBatch),
731 "types": TYPE_F,
732 },
733 "batch_to_space": {
734 "operands": (1, 0),
735 "build_fcn": (TBuilder.BatchToSpace, TGen.tgBasic, ArgGen.agBatchToSpace),
736 "types": TYPE_F,
737 },
738 "space_to_depth": {
739 "operands": (1, 0),
740 "build_fcn": (TBuilder.SpaceToDepth, TGen.tgBasic, ArgGen.agSpaceToDepth),
741 "types": TYPE_F,
742 },
743 "depth_to_space": {
744 "operands": (1, 0),
745 "build_fcn": (TBuilder.DepthToSpace, TGen.tgBasic, ArgGen.agDepthToSpace),
746 "types": TYPE_F,
747 },
748 "one_hot": {
749 "operands": (3, 1),
750 "build_fcn": (TBuilder.OneHot, TGen.tgOneHot, ArgGen.agOneHot),
751 "types": TYPE_FI,
752 },
753 "fakequant": {
754 "operands": (1, 0),
755 "build_fcn": (
756 TBuilder.Fakequant,
757 TGen.tgBasic,
758 ArgGen.agFakequant,
759 ),
760 "types": {"tf": TYPE_F},
761 },
762 "resize_nearest": {
763 "operands": (1, 0),
764 "build_fcn": (TBuilder.ResizeNearest, TGen.tgPooling, ArgGen.agNone),
765 "types": {
766 "tf": TYPE_F,
767 "tflite": list(
768 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
769 ),
770 },
771 },
772 "resize_bilinear": {
773 "operands": (1, 0),
774 "build_fcn": (TBuilder.ResizeBilinear, TGen.tgPooling, ArgGen.agNone),
775 "types": {
776 "tf": TYPE_F,
777 "tflite": list(
778 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
779 ),
780 },
781 },
TatWai Chongf7326092022-06-08 12:17:14 -0700782 "resize_bilinear_v1_align_corners": {
783 "operands": (1, 0),
784 "build_fcn": (
785 TBuilder.ResizeBilinearV1AlignCorners,
786 TGen.tgPooling,
787 ArgGen.agNone,
788 ),
789 "types": {
790 "tf": TYPE_F,
791 "tflite": list(
792 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
793 ),
794 },
795 },
796 "resize_bilinear_v1_none": {
797 "operands": (1, 0),
798 "build_fcn": (TBuilder.ResizeBilinearV1None, TGen.tgPooling, ArgGen.agNone),
799 "types": {
800 "tf": TYPE_F,
801 "tflite": list(
802 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
803 ),
804 },
805 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000806 "left_shift": {
807 "operands": (1, 0),
808 "build_fcn": (TBuilder.LeftShift, TGen.tgBasic, ArgGen.agShift),
809 "types": {"tf": [tf.int32]},
810 },
811 "right_shift": {
812 "operands": (1, 0),
813 "build_fcn": (TBuilder.RightShift, TGen.tgBasic, ArgGen.agShift),
814 "types": {
815 "tf": [
816 tf.int32,
817 ]
818 },
819 },
Jerry Ge9e94af82022-10-27 09:57:00 -0700820 "while": {
821 "operands": (1, 0),
822 "build_fcn": (TBuilder.While, TGen.tgBasic, ArgGen.agNone),
823 "types": {
824 "tflite": list(TYPE_F),
825 },
826 },
827 "lstm": {
828 "operands": (1, 0),
829 "build_fcn": (TBuilder.LSTM, TGen.tgRecurrent, ArgGen.agNone),
830 "types": {
831 "tflite": [
832 tf.float32,
833 # tf.int32
834 ]
835 },
836 },
837 "gru": {
838 "operands": (1, 0),
839 "build_fcn": (TBuilder.GRU, TGen.tgRecurrent, ArgGen.agNone),
840 "types": {
841 "tflite": [
842 tf.float32,
843 # tf.int32
844 ]
845 },
846 },
847 "rnn": {
848 "operands": (1, 0),
849 "build_fcn": (TBuilder.RNN, TGen.tgRecurrent, ArgGen.agNone),
850 "types": {
851 "tflite": [
852 tf.float32,
853 ]
854 },
855 },
Luke Hutton261b7b62023-01-10 14:50:31 +0000856 "rfft2d": {
857 "operands": (1, 0),
858 "build_fcn": (TBuilder.RFFT2d, TGen.tgRFFT2d, ArgGen.agRFFT2d),
859 "types": {
860 "tflite": TYPE_F,
861 },
862 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000863}
864
865# Shapes to be tested; default can be overwritten
866shape_list = [
867 (1,),
868 (64,),
869 (14, 19),
870 (13, 21, 3),
Luke Hutton261b7b62023-01-10 14:50:31 +0000871 (1, 8, 16),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000872 (1, 4, 4, 4),
873 (1, 8, 4, 17),
874 (1, 4, 8, 19),
875 (1, 32, 32, 8),
876 (1, 7, 7, 9),
TatWai Chongfd629052022-07-25 04:01:58 +0000877 (2, 2, 7, 7, 2),
878 (1, 4, 8, 21, 17),
879 (3, 32, 16, 16, 5),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000880]
881
882
883def gen_rand_shapes(args):
884 """Overwrite the global shape list with a new list of random shapes"""
885 global shape_list
886
887 rng = np.random.default_rng(args.random_seed)
888
889 # Don't let things get too big... cap the maximum volume, but let
890 # an individual dimension be 1..47
891 max_total_volume = 32 * 32 * 4
892
893 shape_list = []
TatWai Chongfd629052022-07-25 04:01:58 +0000894 # Only iterate over ranks 2, 3, 4, and 5
895 for rank in range(2, 6):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000896 for n in range(args.random_shapes):
897 new_shape = rng.integers(1, 48, size=rank)
898
TatWai Chongfd629052022-07-25 04:01:58 +0000899 # Set the batch dimension on 4D or 5D objects to 1
900 if rank == 4 or rank == 5:
Jeremy Johnson015c3552022-02-23 12:15:03 +0000901 new_shape[0] = 1
902
903 # Limit the total shape volume and throw out any
904 # shapes that wouldn't leave at least size=2 in some non-batch dimension
905 volume = 1
906 skip_shape = False
907 for i in range(rank):
908
909 volume *= new_shape[i]
910
911 # Reduce the shape, while it's larger than the maximum volume
912 while volume > max_total_volume:
913 new_shape[i] = new_shape[i] // 2
914 volume = volume // 2
915
916 # Now an untenable dimension size? Skip this one.
917 if new_shape[i] < 1:
918 skip_shape = True
919
920 if not skip_shape:
921 shape_list.append(tuple(new_shape))
922
923
924# Construct, run and save a whole tensorflow tf.function to a protobuf file
925# or convert to .tflite if it's quantized unit test
926def run_unit_test(
927 op_name,
928 args,
929 test_dir,
930 curr_shape,
931 addl_args,
932 dtype,
933 excluded_framework_list,
934 quantized_inference_dtype,
935 result_name,
936 seed,
937):
938
939 try:
940 op = TF_OP_LIST[op_name]
941 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
942
943 # Get and seed a random number generator for this test
944 rng = np.random.default_rng(seed)
945
946 # return placeholders=(str: name, np.array: value)
947 # consts=(str: name, np.array: value)
948 placeholders, consts = tensor_gen_fcn(op, curr_shape, dtype, rng)
949
950 # if test doesn't have any placeholders/consts, terminated
951 if len(placeholders) == 0 and len(consts) == 0:
952 return True
953
954 if not args.quiet:
955 print(" {} ".format(test_dir))
956
957 try:
958 os.mkdir(test_dir)
959 except FileExistsError:
960 pass
961
962 const_nodes = [value for name, value in consts]
963
964 num_placeholders = len(placeholders)
965 # if test is quantized, create tensor quantization metadata info for
966 # each input tensor, based on different quantized type
967 if quantized_inference_dtype:
968 is_quantized = True
969 # TODO: support INT8 IFM x INT4 weight later
970 if quantized_inference_dtype == QuantType.ALL_U8:
971 qzero = [128] * num_placeholders
972 numpy_dtype = [np.uint8] * num_placeholders
973 tflite_inference_dtype = tf.uint8
974 elif quantized_inference_dtype == QuantType.ALL_I8:
975 qzero = [0] * num_placeholders
976 numpy_dtype = [np.int8] * num_placeholders
977 tflite_inference_dtype = tf.int8
978 elif quantized_inference_dtype == QuantType.ALL_I16:
979 qzero = [0] * num_placeholders
980 numpy_dtype = [np.int16] * num_placeholders
981 tflite_inference_dtype = tf.int16
982 elif quantized_inference_dtype == QuantType.CONV_U8_U8:
983 assert (
984 num_placeholders == 1
985 ), "Unsupported number of placeholders for Convolution: {}".format(
986 num_placeholders
987 )
988 qzero = [128] * num_placeholders
989 if num_placeholders == 2:
990 numpy_dtype = [np.uint8, np.uint8]
991 else:
992 numpy_dtype = [np.uint8, np.uint8, np.int32]
993 tflite_inference_dtype = tf.uint8
994 elif quantized_inference_dtype == QuantType.CONV_I8_I8:
995 assert (
996 num_placeholders == 1
997 ), "Unsupported number of placeholders for Convolution: {}".format(
998 num_placeholders
999 )
1000 qzero = [0] * num_placeholders
1001 if num_placeholders == 2:
1002 numpy_dtype = [np.int8, np.int8]
1003 else:
1004 numpy_dtype = [np.int8, np.int8, np.int32]
1005 tflite_inference_dtype = tf.int8
1006 elif quantized_inference_dtype == QuantType.CONV_I16_I8:
1007 assert (
1008 num_placeholders == 1
1009 ), "Unsupported number of placeholders for Convolution: {}".format(
1010 num_placeholders
1011 )
1012 if num_placeholders == 2:
1013 qzero = [0, 0]
1014 numpy_dtype = [np.int16, np.int8]
1015 else:
1016 qzero = [0, 0, 0]
1017 numpy_dtype = [
1018 np.int16,
1019 np.int8,
1020 np.int64,
1021 ] # np.int64 to represent 40 bits accumulator
1022 tflite_inference_dtype = tf.int16
1023 else:
1024 raise Exception(
1025 "Unsupported fakequant dtype: {}".format(quantized_inference_dtype)
1026 )
1027
1028 else:
1029 is_quantized = False
1030
1031 tf_model_filename = None
1032 tf_result_npy_filename = None
1033 tf_result_name = None
1034
1035 tflite_model_filename = None
1036 tflite_result_npy_filename = None
1037 tflite_result_name = None
1038
1039 placeholder_names = []
1040 placeholder_vals = []
1041 placeholder_signatures = ()
1042 placeholder_npy_filenames = []
1043 placeholder_shapes = []
1044
1045 for idx, (name, val) in enumerate(placeholders):
1046 placeholder_names.append(name)
1047 placeholder_signatures = placeholder_signatures + (
1048 tf.TensorSpec(shape=val.shape, dtype=val.dtype, name=name),
1049 )
1050 placeholder_npy_filenames.append("{}.npy".format(name.split(":")[0]))
1051 placeholder_shapes.append(val.shape)
1052
1053 # Get test builder class
1054 fcn_node = op_fcn(*const_nodes, *addl_args, result_name)
1055 concrete_function = tf.function(input_signature=placeholder_signatures)(
1056 fcn_node.eval
1057 ).get_concrete_function()
1058
1059 if is_quantized:
1060
1061 assert dtype is tf.float32, "quantized test must come from float32 graph"
1062
1063 # 1. Quantize float placeholder npy to quantized to feed the graph
1064 for idx, (name, val) in enumerate(placeholders):
1065
1066 # we use np.amin()/np.amax() to determine dynamic range
1067 # for quantized test
1068 zeropoint = 0
1069 scale = 1.0
1070 if numpy_dtype[idx] != np.int64:
1071 qmin = np.iinfo(numpy_dtype[idx]).min
1072 qmax = np.iinfo(numpy_dtype[idx]).max
1073 num_bits = np.iinfo(numpy_dtype[idx]).bits
1074 # 40 bit is represented as np.int64
1075 else:
1076 num_bits = 40
1077 qmin = -(1 << num_bits)
1078 qmax = (1 << num_bits) - 1
1079
1080 min_val = np.amin(val)
1081 max_val = np.amax(val)
1082
1083 # for single value tensor, we set scale equal to the abs(value),
1084 # and fix zeropoint to 128
1085 # if val > 0, it'll be represented as 129,
1086 # where val = (129 - 128) * val
1087 # if val < 0, it'll be represented as 127,
1088 # where val = (127 - 128) * (-val)
1089 # if val == 0, it'll be represted as 128, with range [-128.0, 128.0]
1090 # and let quantized 1 represent the value
1091 # also adjust effective min/max consequently
1092 if max_val == min_val:
1093 if max_val != 0:
1094 scale = abs(max_val)
1095 else:
1096 scale = 1.0
1097 min_val = float(qmin - qzero[idx]) * scale
1098 max_val = float(qmax - qzero[idx]) * scale
1099 else:
1100 scale = (max_val - min_val) / float(qmax - qmin)
1101 zeropoint = int(round((-min_val) / scale)) + qmin
1102
1103 # run through tf.fakequant first to assure quantization error aligned
1104 fakequant_val = tf.quantization.fake_quant_with_min_max_args(
1105 val,
1106 min=min_val,
1107 max=max_val,
1108 num_bits=num_bits,
1109 name="gen_quant_npy",
1110 )
1111
1112 quant_val = np.round(fakequant_val / scale).astype(np.int32) + zeropoint
1113
1114 # very few unit tests after TF hash may/2020, this quantized
1115 # value for some reason exceed [0, 255] range
1116 saved_val = np.clip(quant_val, qmin, qmax).astype(numpy_dtype[idx])
1117
1118 # saved all quantized tensor as np.int32
1119 # since TOSA numpy Cpp API only supports int32
1120 np.save(
1121 os.path.join(test_dir, placeholder_npy_filenames[idx]),
1122 saved_val.astype(np.int32),
1123 False,
1124 )
1125
1126 placeholder_vals.append(tf.convert_to_tensor(saved_val))
1127
1128 # 2. Convert the model to quantized TFLite flatbuffer
1129 module = tf.Module()
1130 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1131 [concrete_function], module
1132 )
1133 converter.optimizations = [tf.lite.Optimize.DEFAULT]
1134 converter.experimental_new_converter = True
1135
1136 # use MLIR-based post-quantizer
1137 converter.experimental_new_quantizer = True
1138
1139 flag = (
1140 tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 # noqa: E501
1141 )
1142 if tflite_inference_dtype == tf.int16:
1143 converter.target_spec.supported_ops = [flag]
1144
1145 def input_stats():
1146 for i in range(0, args.num_samples):
1147 a = [
1148 TGen.getRand(shape, tf.float32, rng)
1149 for shape in placeholder_shapes
1150 ]
1151 yield a
1152
1153 converter.representative_dataset = input_stats
1154 converter.inference_input_type = tflite_inference_dtype
1155 converter.inference_output_type = tflite_inference_dtype
1156
1157 tflite_model = converter.convert()
1158
1159 tflite_model_filename = "model.tflite"
1160
1161 # Write out converted model to disk
1162 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1163 f.write(tflite_model)
1164
1165 else: # is_quantized is False
1166
1167 # 1. Saved out numpy array directly
1168 for idx, (name, val) in enumerate(placeholders):
1169 placeholder_vals.append(tf.convert_to_tensor(val))
1170 np.save(
1171 os.path.join(test_dir, placeholder_npy_filenames[idx]), val, False
1172 )
1173
1174 # 2.a Saved out .pb if framework includes tensorflow
1175 if "tf" not in excluded_framework_list:
1176 # Write out graph as protobuf to disk
1177 tf_model_filename = "model.pb"
1178 tf.io.write_graph(
1179 concrete_function.graph, test_dir, tf_model_filename, True
1180 )
1181
1182 # 2.b Saved out .tflite if framework includes tflite
1183 if "tflite" not in excluded_framework_list:
1184 # Convert the model to TFLite flatbuffer
1185 module = tf.Module()
1186 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1187 [concrete_function], module
1188 )
1189
1190 converter.experimental_new_converter = True
1191
1192 # Even it's non-quantized int32 test, this needs to be set to tf.float32
1193 converter.inference_input_type = tf.float32
1194 converter.inference_output_type = tf.float32
1195 tflite_model = converter.convert()
1196
1197 # Write out converted model to disk
1198 tflite_model_filename = "model.tflite"
1199 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1200 f.write(tflite_model)
1201
1202 # Get TF reference result if .pb is specified
1203 if tf_model_filename:
1204 tf_result_npy_filename = "tf_result.npy"
1205 tf_result = concrete_function(*placeholder_vals)
1206 np.save(os.path.join(test_dir, tf_result_npy_filename), tf_result, False)
1207
1208 tf_result_name = result_name
1209
1210 # Get TFLite inference result if .tflite is specified
1211 if tflite_model_filename:
1212 tflite_result_npy_filename = "tflite_result.npy"
1213
Luke Hutton5c844212023-01-27 14:17:52 +00001214 ops_with_optimized_only_kernel = ["elu", "ceil", "gather", "rfft2d"]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001215
1216 if args.tflite_kernel_mode == "optimized" or (
1217 op_name in ops_with_optimized_only_kernel
1218 ):
1219 interpreter = tf.lite.Interpreter(
1220 model_path=os.path.join(test_dir, tflite_model_filename)
1221 )
1222 elif args.tflite_kernel_mode == "reference":
1223 interpreter = tf.lite.Interpreter(
1224 model_path=os.path.join(test_dir, tflite_model_filename),
1225 experimental_op_resolver_type=OpResolverType.BUILTIN_REF,
1226 )
1227 else:
1228 assert 0, "unknown tflite interpreter mode {}".format(
1229 args.tflite_kernel_mode
1230 )
1231 interpreter.allocate_tensors()
1232
1233 input_details = interpreter.get_input_details()
1234 output_details = interpreter.get_output_details()
1235
1236 assert len(input_details) == len(
1237 placeholder_vals
1238 ), "number of placeholder mismatch"
1239
1240 for idx, val in enumerate(placeholder_vals):
1241 interpreter.set_tensor(input_details[idx]["index"], val.numpy())
1242
1243 interpreter.invoke()
1244 tflite_result = interpreter.get_tensor(output_details[0]["index"])
1245
1246 np.save(
1247 os.path.join(test_dir, tflite_result_npy_filename), tflite_result, False
1248 )
1249
1250 # Result tensor name would change after converting to TFLite flatbuffer
1251 # Overwrite the information from TFLite models directly.
1252 # Assume single result tensor now
1253 tflite_result_name = output_details[0]["name"]
1254
1255 # Write out test descriptor
1256 write_test_json(
1257 filename=os.path.join(test_dir, "test.json"),
1258 tf_model_filename=tf_model_filename,
1259 tf_result_npy_filename=tf_result_npy_filename,
1260 tf_result_name=tf_result_name,
1261 tflite_model_filename=tflite_model_filename,
1262 tflite_result_npy_filename=tflite_result_npy_filename,
1263 tflite_result_name=tflite_result_name,
1264 ifm_name=placeholder_names,
1265 ifm_file=placeholder_npy_filenames,
1266 ifm_shape=placeholder_shapes,
1267 framework_exclusions=excluded_framework_list,
1268 quantized=is_quantized,
1269 )
1270 except Exception as e:
1271 msg = "Error running task: {}".format(e)
1272 print(msg)
1273 print(
1274 "".join(
1275 traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)
1276 )
1277 )
1278 return False
1279 return True
1280
1281
1282def build_const_net(
1283 args,
1284 curr_shape,
1285 op_name,
1286 dtype,
1287 excluded_framework_list,
1288 quantized_inference_dtype,
1289 result_name,
1290 seed,
1291 rng,
1292 filter,
1293 unit_test_args,
1294):
1295
1296 if quantized_inference_dtype:
1297 quant_dtype = get_tf_dtype(quantized_inference_dtype)
1298 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, quant_dtype))
1299 else:
1300 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, dtype))
1301 test_dir = os.path.join(args.output_dir, test_dir)
1302
1303 # If the operator has an additional function to generate arguments, call it
1304 # here and iterate through the argument list that it generates
1305 op = TF_OP_LIST[op_name]
1306 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1307
TatWai Chongfd629052022-07-25 04:01:58 +00001308 try:
1309 rank_lo, rank_hi = op["rank"]
1310 except KeyError:
1311 # Set testing rank to (1, 4) in default.
1312 rank_lo = 1
1313 rank_hi = 4
1314
1315 if len(curr_shape) not in range(rank_lo, rank_hi + 1):
1316 return
1317
Jeremy Johnson015c3552022-02-23 12:15:03 +00001318 addl_args_tuple = arg_gen_fcn(op, curr_shape, rng)
1319 for desc, addl_args in addl_args_tuple:
Jeremy Johnson0e6218e2022-05-05 17:08:04 +01001320 # Only filter on the full test_name, not the output directory
1321 _, test_name = os.path.split(test_dir + desc)
1322 if not filter or filter.search(test_name):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001323 unit_test_args.append(
1324 [
1325 op_name,
1326 args,
1327 test_dir + desc,
1328 curr_shape,
1329 addl_args,
1330 dtype,
1331 excluded_framework_list,
1332 quantized_inference_dtype,
1333 result_name,
1334 seed,
1335 ]
1336 )
1337
1338
1339# python hash is not reproducible, create hash for our purpose
1340def op_name_hash(op_name):
1341 result = 0xDEADBEEF
1342 for ch in op_name:
1343 if result & 1:
1344 result = (ord(ch) << 24) ^ (result >> 1) ^ 0x82608EDB
1345 else:
1346 result = (ord(ch) << 24) ^ (result >> 1)
1347
1348 return result
1349
1350
1351def generate_op_tests(args, op_name, shape_list, result_name, filter, unit_test_args):
1352
1353 if not args.quiet:
1354 print(
1355 "Generating tests for {} ".format(
1356 op_name
1357 )
1358 )
1359
1360 op = TF_OP_LIST[op_name]
1361
1362 # Seed the RNG so that we get the same random tests for each test each time
1363 # If the number of tests for a given generation function changes, the tests
1364 # for that operator may also change accordingly, but this will at least keep
1365 # down churn across operators.
1366
1367 bounded_hash_val = (args.random_seed + op_name_hash(op_name)) % np.iinfo(
1368 np.int32
1369 ).max
1370 rng = np.random.default_rng(bounded_hash_val)
1371
1372 # this is a dictionary with 'tf' and 'tflite' as key
1373 # and value being the data types we want to test under these framework
1374
1375 if isinstance(op["types"], dict):
1376 try:
1377 tf_dtypes = op["types"]["tf"]
1378 except KeyError:
1379 tf_dtypes = []
1380 try:
1381 tflite_dtypes = op["types"]["tflite"]
1382 except KeyError:
1383 tflite_dtypes = []
1384 elif isinstance(op["types"], list):
1385 tf_dtypes = op["types"]
1386 tflite_dtypes = op["types"]
1387
1388 tf_nonquantized_dtypes = tf_dtypes # tf doesn't support quantized data types
1389 tflite_quantized_dtypes = []
1390 tflite_nonquantized_dtypes = []
1391 for dtype in tflite_dtypes:
1392 if isinstance(dtype, QuantType):
1393 tflite_quantized_dtypes.append(dtype)
1394 else:
1395 tflite_nonquantized_dtypes.append(dtype)
1396
1397 nonquantized_dtypes_set = set(tf_nonquantized_dtypes).union(
1398 set(tflite_nonquantized_dtypes)
1399 )
1400 nonquantized_dtypes = list(nonquantized_dtypes_set)
1401 quantized_dtypes = tflite_quantized_dtypes
1402
1403 # populate non quantized unit test arguments
1404 for dtype in nonquantized_dtypes:
1405
1406 excluded_framework_set = set(ALL_FRAMEWORKS)
1407 if dtype in tf_nonquantized_dtypes:
1408 excluded_framework_set.remove("tf")
1409 if dtype in tflite_nonquantized_dtypes:
1410 excluded_framework_set.remove("tflite")
1411 excluded_framework_list = list(excluded_framework_set)
1412
1413 for curr_shape in shape_list:
1414 build_const_net(
1415 args,
1416 curr_shape,
1417 op_name,
1418 dtype,
1419 excluded_framework_list,
1420 None,
1421 result_name,
1422 bounded_hash_val,
1423 rng,
1424 filter,
1425 unit_test_args,
1426 )
1427
1428 # populate quantized unit test arguments
1429 # must exclude 'tf' and source dtype being tf.float32
1430 for dtype in quantized_dtypes:
1431 for curr_shape in shape_list:
1432 build_const_net(
1433 args,
1434 curr_shape,
1435 op_name,
1436 tf.float32,
1437 ["tf"],
1438 dtype,
1439 result_name,
1440 bounded_hash_val,
1441 rng,
1442 filter,
1443 unit_test_args,
1444 )
1445
1446 return unit_test_args
1447
1448
1449def createDynamicOpLists():
1450 """The templated operators are conv2d-style operators with a number of kernel
1451 sizes. Since the operator is unchanged, we generate the range of kernel
1452 sizes here in this loop and remove the original templates from the list.
1453
1454 This could be expanded to non-conv2d-style operators in the future."""
1455
1456 # Dynamically create op lists for convolutions with a list of kernel sizes
1457 KERNELS = [
1458 [1, 1],
1459 [3, 3],
1460 [5, 5],
1461 ]
1462
TatWai Chongfd629052022-07-25 04:01:58 +00001463 # dim = [D, H, W]
1464 KERNELS_3D = [
1465 [1, 1, 1],
1466 [2, 3, 3],
1467 [3, 5, 5],
1468 ]
1469
Jeremy Johnson015c3552022-02-23 12:15:03 +00001470 TEMPLATE_LIST = [
1471 "conv2d",
1472 "conv2d_bias",
1473 "conv2d_relu",
1474 "conv2d_relu6",
1475 "conv2d_relu_n1_to_1",
1476 "conv2d_tanh",
1477 "depthwise_conv2d",
1478 "depthwise_conv2d_bias",
1479 "transpose_conv2d",
1480 ]
1481
TatWai Chongfd629052022-07-25 04:01:58 +00001482 TEMPLATE_LIST_CONV3D = [
1483 "conv3d",
1484 "conv3d_bias",
1485 ]
1486
Jeremy Johnson015c3552022-02-23 12:15:03 +00001487 for t in TEMPLATE_LIST:
1488 for k in KERNELS:
1489 testName = "{}_{}x{}".format(t, k[0], k[1])
1490 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1491 TF_OP_LIST[testName]["filter"] = k
1492 TF_OP_LIST[testName]["template"] = False
1493
TatWai Chongfd629052022-07-25 04:01:58 +00001494 # The existing operators don't support the dimension of kernel that is higher than 2.
1495 for t in TEMPLATE_LIST_CONV3D:
1496 for k in KERNELS_3D:
1497 testName = "{}_{}x{}x{}".format(t, k[0], k[1], k[2])
1498 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1499 TF_OP_LIST[testName]["filter"] = k
1500 TF_OP_LIST[testName]["template"] = False
1501
Jeremy Johnson015c3552022-02-23 12:15:03 +00001502 # Delete any templates after having created any dynamic ops
1503 # This is a two-pass operation because it's bad practice to delete
1504 # keys from dictionaries while iterating
1505 keyList = []
1506 for k in TF_OP_LIST:
1507 try:
1508 if TF_OP_LIST[k]["template"]:
1509 keyList.append(k)
1510 continue
1511 except KeyError:
1512 pass
1513
1514 for k in keyList:
1515 del TF_OP_LIST[k]
1516
1517
1518def main():
1519 parser = argparse.ArgumentParser()
1520 parser.add_argument(
1521 "--seed", dest="random_seed", default=42, type=int, help="Random seed"
1522 )
1523 parser.add_argument(
1524 "--random-shapes",
1525 dest="random_shapes",
1526 default=0,
1527 type=int,
1528 help=(
1529 "Use N random shapes of each rank for generating tests,"
1530 "seeded with random seed"
1531 ),
1532 )
1533 parser.add_argument(
1534 "-o",
1535 "--output-dir",
1536 dest="output_dir",
1537 default=".",
1538 type=str,
1539 help="Test output directory path prefix",
1540 )
1541 parser.add_argument(
1542 "-q",
1543 "--quiet",
1544 dest="quiet",
1545 default=False,
1546 action="store_true",
1547 help="Do not print test names",
1548 )
1549 parser.add_argument(
1550 "-j", "--jobs", dest="jobs", type=int, default=1, help="Number of parallel jobs"
1551 )
1552 parser.add_argument(
1553 "-m",
1554 "--tflite-kernel-mode",
1555 dest="tflite_kernel_mode",
1556 type=str,
1557 choices=["reference", "optimized"],
1558 default="reference",
1559 help="TFLite interpreter kernel mode",
1560 )
1561 parser.add_argument(
1562 "--num-samples",
1563 dest="num_samples",
1564 default=200,
1565 type=int,
1566 help="Number of input samples for post-training quantization",
1567 )
1568 parser.add_argument(
1569 "--filter",
1570 dest="filter",
1571 default="",
1572 type=str,
1573 help="Filter test names by this expression",
1574 )
1575 args = parser.parse_args()
1576
1577 # Turn the filter into a re object if present
1578 filter = None
1579 if args.filter != "":
1580 filter = re.compile(args.filter)
1581
1582 # Autodetect CPU count
1583 if args.jobs <= 0:
1584 args.jobs = os.cpu_count()
1585
1586 # Disable TF info messages
1587 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
1588
1589 try:
1590 os.makedirs(args.output_dir)
1591 except FileExistsError:
1592 pass
1593
1594 if args.random_shapes:
1595 gen_rand_shapes(args)
1596
1597 # Build dynamic ops
1598 createDynamicOpLists()
1599
1600 # Generate the test list and arguments to run_unit_test()
1601 unit_test_args = []
1602
1603 for op in TF_OP_LIST:
1604 generate_op_tests(args, op, shape_list, "result", filter, unit_test_args)
1605
1606 errors = 0
1607 for t in unit_test_args:
1608 if not run_unit_test(*t):
1609 errors = errors + 1
1610
1611 if not args.quiet:
1612 print("\nAll tasks done - with {} errors".format(errors))
1613
1614 return 1 if errors else 0
1615
1616
1617if __name__ == "__main__":
1618 exit(main())