blob: 979447839f1365ac764785993c65c05979b8bb6c [file] [log] [blame]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001#!/usr/bin/env python3
Luke Hutton261b7b62023-01-10 14:50:31 +00002# Copyright (c) 2020-2023, ARM Limited.
Jeremy Johnson015c3552022-02-23 12:15:03 +00003# SPDX-License-Identifier: Apache-2.0
4import argparse
5import os
6import re
7import traceback
8
9import numpy as np
10
11# Level | Level for Humans | Level Description
12# -------|------------------|------------------------------------
13# 0 | DEBUG | [Default] Print all messages
14# 1 | INFO | Filter out INFO messages
15# 2 | WARNING | Filter out INFO & WARNING messages
16# 3 | ERROR | Filter out all messages
17# Filter tensorflow debug message except errors
18os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
19
20# Flake8 E402 - ignore imports not at top of file to allow os.environ setting
21import tensorflow as tf # noqa: E402
22from frameworks.write_test_json import write_test_json # noqa: E402
23from frameworks.arg_gen import ArgGen # noqa: E402
24from frameworks.tensor_gen import TGen # noqa: E402
25from frameworks.test_builder import TBuilder # noqa: E402
Jeremy Johnson5d1a3472022-03-31 09:50:06 +010026from frameworks.test_gen_utils import ( # noqa: E402
Jeremy Johnson015c3552022-02-23 12:15:03 +000027 QuantType,
28 get_tf_dtype,
29 get_shape_str,
30) # noqa: E402
31from tensorflow.lite.python.interpreter import OpResolverType # noqa: E402
32
33# All of the supported frameworks
34ALL_FRAMEWORKS = ["tf", "tflite"]
35
36# Lists of different data types
37TYPE_F = [tf.float32]
38TYPE_I = [tf.int32]
39TYPE_FI = [tf.float32, tf.int32]
40TYPE_B = [tf.bool]
41TYPE_FIB = [tf.float32, tf.int32, tf.bool]
42TYPE_H = [tf.float16]
43TYPE_FH = [tf.float32, tf.float16]
44TYPE_FHI = [tf.float32, tf.float16, tf.int32]
45TYPE_FHIB = [tf.float32, tf.float16, tf.int32, tf.bool]
46
47# The list of operator tests
48# Each dictionary entry for an op is a dictionary with the following required members:
49# 'operands': tuple (number_of_placeholder_tensors, number_of_constant_tensors)
50# 'build_fcn: tuple (Test builder function, Tensor generator function,
51# Argument generator function)
52# 'types': list of Tensorflow types that should be tested for this op
53# OR
54# a dictionary of {'framework_name': [type_list] } for cases where only
55# a subset of the types should be tested in each framework. This can also
56# be used to restrict an operator to a particular framework.
57#
58# And optional members:
59# 'template': boolean (indicates that this is a templated op which gets further
60# processing in createDynamicOpLists)
61# 'bias': boolean indicating that there is a bias component to be generated
62# 'qtypes': List of QuantType quantized types to generate for this op
TatWai Chongfd629052022-07-25 04:01:58 +000063# 'rank': tuple (lowest rank, highest rank). Dimension range of input tensor.
Jeremy Johnson015c3552022-02-23 12:15:03 +000064
65TF_OP_LIST = {
66 "add": {
67 "operands": (2, 0),
68 "build_fcn": (TBuilder.Add, TGen.tgBFuzz, ArgGen.agNone),
69 "types": {
70 "tf": TYPE_FI,
71 "tflite": list(
72 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
73 ),
74 },
75 },
76 "sub": {
77 "operands": (2, 0),
78 "build_fcn": (TBuilder.Sub, TGen.tgBFuzz, ArgGen.agNone),
79 "types": {
80 "tf": TYPE_FI,
81 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
82 # QuantType.ALL_I16 fail in TFLite conversion
83 },
84 },
85 "mul": {
86 "operands": (2, 0),
87 "build_fcn": (TBuilder.Mul, TGen.tgBFuzz, ArgGen.agNone),
88 "types": {
89 "tf": TYPE_FI,
90 "tflite": list(
91 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
92 ),
93 },
94 },
95 "exp": {
96 "operands": (1, 0),
97 "build_fcn": (TBuilder.Exp, TGen.tgBasic, ArgGen.agNone),
98 "types": TYPE_F,
99 },
100 "rcp": {
101 "operands": (1, 0),
102 "build_fcn": (TBuilder.Rcp, TGen.tgBasic, ArgGen.agNone),
103 "types": TYPE_F,
104 },
105 "relu": {
106 "operands": (1, 0),
107 "build_fcn": (TBuilder.Relu, TGen.tgBasic, ArgGen.agNone),
108 "types": {
109 "tf": TYPE_F,
110 "tflite": list(
111 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
112 ),
113 },
114 },
Jerry Ge93912432022-07-22 10:29:13 -0700115 "relu1": {
116 "operands": (1, 0),
117 "build_fcn": (TBuilder.Relu1, TGen.tgBasic, ArgGen.agNone),
118 "types": {
119 "tf": [],
120 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
121 },
122 },
Jerry Ge2eea5bf2022-10-11 16:27:05 +0000123 "relu0To1": {
124 "operands": (1, 0),
125 "build_fcn": (TBuilder.Relu0To1, TGen.tgBasic, ArgGen.agNone),
126 "types": {
127 "tf": [],
128 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
129 },
130 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000131 "relu6": {
132 "operands": (1, 0),
133 "build_fcn": (TBuilder.Relu6, TGen.tgBasic, ArgGen.agNone),
134 "types": {
135 "tf": TYPE_F,
136 "tflite": list(
137 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
138 ),
139 },
140 },
141 "leaky_relu": {
142 "operands": (1, 0),
143 "build_fcn": (TBuilder.LeakyRelu, TGen.tgBasic, ArgGen.agFloat),
144 "types": {
145 "tf": TYPE_F,
146 "tflite": list(
147 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
148 ),
149 },
150 },
TatWai Chong41a04fe2022-11-03 21:44:32 +0000151 "prelu": {
152 "operands": (1, 0),
153 "build_fcn": (TBuilder.Prelu, TGen.tgBasic, ArgGen.agNone),
154 "types": {
155 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
156 },
157 },
TatWai Chong473eb382022-08-02 04:21:30 +0000158 "gelu": {
159 "operands": (1, 0),
160 "build_fcn": (TBuilder.Gelu, TGen.tgBasic, ArgGen.agNone),
161 "types": {
162 # Need compiler support for tf.Erf.
163 # "tf": TYPE_F,
164 "tflite": list(
165 # Only float32, int8 and uint8 supported currently
166 TYPE_F
167 + [QuantType.ALL_U8, QuantType.ALL_I8]
168 ),
169 },
170 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000171 "concat": {
172 "operands": (2, 0),
173 "build_fcn": (TBuilder.Concat, TGen.tgBasic, ArgGen.agAxes),
174 "types": TYPE_FI,
175 },
176 "bitwise_and": {
177 "operands": (2, 0),
178 "build_fcn": (TBuilder.BitwiseAnd, TGen.tgBFuzz, ArgGen.agNone),
179 "types": {"tf": TYPE_I}, # Not supported in TF Lite
180 },
181 "bitwise_or": {
182 "operands": (2, 0),
183 "build_fcn": (TBuilder.BitwiseOr, TGen.tgBFuzz, ArgGen.agNone),
184 "types": {"tf": TYPE_I}, # Not supported in TF Lite
185 },
186 "bitwise_not": {
187 "operands": (1, 0),
188 "build_fcn": (TBuilder.BitwiseNot, TGen.tgBFuzz, ArgGen.agNone),
189 "types": {"tf": TYPE_I}, # Not supported in TF Lite
190 },
191 "bitwise_xor": {
192 "operands": (2, 0),
193 "build_fcn": (TBuilder.BitwiseXor, TGen.tgBFuzz, ArgGen.agNone),
194 "types": {"tf": TYPE_I}, # Not supported in TF Lite
195 },
196 "logical_and": {
197 "operands": (2, 0),
198 "build_fcn": (TBuilder.LogicalAnd, TGen.tgBFuzz, ArgGen.agNone),
199 "types": TYPE_B,
200 },
201 "logical_or": {
202 "operands": (2, 0),
203 "build_fcn": (TBuilder.LogicalOr, TGen.tgBFuzz, ArgGen.agNone),
204 "types": TYPE_B,
205 },
206 "logical_not": {
207 "operands": (1, 0),
208 "build_fcn": (TBuilder.LogicalNot, TGen.tgBFuzz, ArgGen.agNone),
209 "types": TYPE_B,
210 },
211 "reduce_any": {
212 "operands": (1, 0),
213 "build_fcn": (TBuilder.ReduceAny, TGen.tgBasic, ArgGen.agAxesListKeepdims),
214 "types": TYPE_B,
215 },
216 "reduce_all": {
217 "operands": (1, 0),
218 "build_fcn": (TBuilder.ReduceAll, TGen.tgBasic, ArgGen.agAxesListKeepdims),
TatWai Chong6ad6d6e2022-11-11 15:28:50 -0800219 "types": TYPE_B,
Jeremy Johnson015c3552022-02-23 12:15:03 +0000220 },
221 "reduce_min": {
222 "operands": (1, 0),
223 "build_fcn": (TBuilder.ReduceMin, TGen.tgBasic, ArgGen.agAxesListKeepdims),
224 "types": {
225 "tf": TYPE_FI,
226 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
227 },
228 },
229 "reduce_max": {
230 "operands": (1, 0),
231 "build_fcn": (TBuilder.ReduceMax, TGen.tgBasic, ArgGen.agAxesListKeepdims),
232 "types": {
233 "tf": TYPE_FI,
234 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
235 },
236 },
237 "reduce_sum": {
238 "operands": (1, 0),
239 "build_fcn": (TBuilder.ReduceSum, TGen.tgBasic, ArgGen.agAxesListKeepdims),
240 "types": {
241 "tf": TYPE_F,
242 # v2 converter doesn't recognize quantized reduce_sum
243 # "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
244 "tflite": TYPE_F,
245 },
246 },
247 "reduce_mean": {
248 "operands": (1, 0),
249 "build_fcn": (TBuilder.ReduceMean, TGen.tgBasic, ArgGen.agAxesListKeepdims),
250 "types": {
251 "tf": TYPE_F,
252 "tflite": list(
253 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
254 ),
255 },
256 },
257 "reduce_product": {
258 "operands": (1, 0),
259 "build_fcn": (TBuilder.ReduceProduct, TGen.tgBasic, ArgGen.agAxesListKeepdims),
260 "types": TYPE_F,
261 },
262 "min": {
263 "operands": (2, 0),
264 "build_fcn": (TBuilder.Min, TGen.tgBFuzz, ArgGen.agNone),
265 "types": TYPE_FI,
266 },
267 "max": {
268 "operands": (2, 0),
269 "build_fcn": (TBuilder.Max, TGen.tgBFuzz, ArgGen.agNone),
270 "types": TYPE_FI,
271 },
272 "pow": {
273 "operands": (2, 0),
274 "build_fcn": (TBuilder.Pow, TGen.tgBFuzz, ArgGen.agNone),
275 # Technically, integer is supported, but only for positive exponents.
276 # Needs a random argument generator.
277 "types": TYPE_F,
278 },
279 "abs": {
280 "operands": (1, 0),
281 "build_fcn": (TBuilder.Abs, TGen.tgBasic, ArgGen.agNone),
282 "types": TYPE_F,
283 },
284 "ceil": {
285 "operands": (1, 0),
286 "build_fcn": (TBuilder.Ceil, TGen.tgBasic, ArgGen.agNone),
287 "types": TYPE_F,
288 },
289 "floor": {
290 "operands": (1, 0),
291 "build_fcn": (TBuilder.Floor, TGen.tgBasic, ArgGen.agNone),
292 "types": TYPE_F,
293 },
294 "log": {
295 "operands": (1, 0),
296 "build_fcn": (TBuilder.Log, TGen.tgBasic, ArgGen.agNone),
297 "types": TYPE_F,
298 },
299 "negate": {
300 "operands": (1, 0),
301 "build_fcn": (TBuilder.Negate, TGen.tgBasic, ArgGen.agNone),
302 "types": TYPE_F,
303 },
304 "rsqrt": {
305 "operands": (1, 0),
306 "build_fcn": (TBuilder.Rsqrt, TGen.tgBasic, ArgGen.agNone),
307 "types": TYPE_F,
308 },
TatWai Chongd713a4d2022-11-10 13:54:28 -0800309 "sign": {
310 "operands": (1, 0),
311 "build_fcn": (TBuilder.Sign, TGen.tgBasic, ArgGen.agNone),
312 "types": {
313 "tf": TYPE_F,
314 },
315 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000316 "sigmoid": {
317 "operands": (1, 0),
318 "build_fcn": (TBuilder.Sigmoid, TGen.tgBasic, ArgGen.agNone),
319 "types": {
320 "tf": TYPE_F,
321 "tflite": list(
322 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
323 ),
324 },
325 },
326 "tanh": {
327 "operands": (1, 0),
328 "build_fcn": (TBuilder.Tanh, TGen.tgBasic, ArgGen.agNone),
329 "types": {
330 "tf": TYPE_F,
331 "tflite": list(
332 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
333 ),
334 },
335 },
Luke Hutton41601862022-12-06 17:29:15 +0000336 "sin": {
337 "operands": (1, 0),
338 "build_fcn": (TBuilder.Sin, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000339 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000340 },
341 "cos": {
342 "operands": (1, 0),
343 "build_fcn": (TBuilder.Cos, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000344 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000345 },
Luke Hutton2138a192022-12-15 11:01:39 +0000346 "atan2": {
347 "operands": (2, 0),
348 "build_fcn": (TBuilder.Atan2, TGen.tgBasic, ArgGen.agNone),
349 "types": {
350 "tflite": TYPE_F,
351 },
352 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000353 "square": {
354 "operands": (1, 0),
355 "build_fcn": (TBuilder.Square, TGen.tgBasic, ArgGen.agNone),
356 "types": TYPE_F,
357 },
358 "squared_difference": {
359 "operands": (2, 0),
360 "build_fcn": (TBuilder.SquaredDifference, TGen.tgBFuzz, ArgGen.agNone),
361 "types": TYPE_F,
362 },
363 "equal": {
364 "operands": (2, 0),
365 "build_fcn": (TBuilder.Equal, TGen.tgBFuzz, ArgGen.agNone),
366 "types": TYPE_FI,
367 },
368 "greater_equal": {
369 "operands": (2, 0),
370 "build_fcn": (TBuilder.GreaterEqual, TGen.tgBFuzz, ArgGen.agNone),
371 "types": TYPE_FI,
372 },
373 "greater": {
374 "operands": (2, 0),
375 "build_fcn": (TBuilder.Greater, TGen.tgBFuzz, ArgGen.agNone),
376 "types": TYPE_FI,
377 },
378 "less": {
379 "operands": (2, 0),
380 "build_fcn": (TBuilder.Less, TGen.tgBFuzz, ArgGen.agNone),
381 "types": TYPE_FI,
382 },
383 "less_equal": {
384 "operands": (2, 0),
385 "build_fcn": (TBuilder.LessEqual, TGen.tgBFuzz, ArgGen.agNone),
386 "types": TYPE_FI,
387 },
388 "conv2d_TEMPLATE": {
389 "operands": (1, 1),
390 "build_fcn": (TBuilder.Conv2d, TGen.tgConv2d, ArgGen.agConv2d),
391 "types": {
392 "tf": [tf.float32],
393 "tflite": [
394 tf.float32,
395 QuantType.CONV_U8_U8,
396 QuantType.CONV_I8_I8,
397 QuantType.CONV_I16_I8,
398 ],
399 },
400 "template": True,
401 },
402 "conv2d_relu_TEMPLATE": {
403 "operands": (1, 2),
404 "build_fcn": (TBuilder.Conv2dRelu, TGen.tgConv2d, ArgGen.agNone),
405 "types": {
406 "tf": [tf.float32],
407 "tflite": [
408 tf.float32,
409 QuantType.CONV_U8_U8,
410 QuantType.CONV_I8_I8,
411 QuantType.CONV_I16_I8,
412 ],
413 },
414 "template": True,
415 },
416 "conv2d_relu6_TEMPLATE": {
417 "operands": (1, 2),
418 "build_fcn": (TBuilder.Conv2dRelu6, TGen.tgConv2d, ArgGen.agNone),
419 "types": {
420 "tf": [tf.float32],
421 "tflite": [
422 tf.float32,
423 QuantType.CONV_U8_U8,
424 QuantType.CONV_I8_I8,
425 QuantType.CONV_I16_I8,
426 ],
427 },
428 "template": True,
429 },
430 "conv2d_relu_n1_to_1_TEMPLATE": {
431 "operands": (1, 2),
432 "build_fcn": (TBuilder.Conv2dReluN1To1, TGen.tgConv2d, ArgGen.agNone),
433 "types": {
434 "tf": [tf.float32],
435 "tflite": [
436 tf.float32,
437 QuantType.CONV_U8_U8,
438 QuantType.CONV_I8_I8,
439 QuantType.CONV_I16_I8,
440 ],
441 },
442 "template": True,
443 },
444 # This test is converted as:
445 # tfl.conv2d(){fused_activation_function="NONE"} + tfl.tanh()
446 # TODO: anyway to generate tfl.conv2d(){fused_activation_function="TANH"}?
447 "conv2d_tanh_TEMPLATE": {
448 "operands": (1, 2),
449 "build_fcn": (TBuilder.Conv2dTanh, TGen.tgConv2d, ArgGen.agNone),
450 "types": {
451 "tf": [tf.float32],
452 "tflite": [
453 tf.float32,
454 QuantType.CONV_U8_U8,
455 QuantType.CONV_I8_I8,
456 QuantType.CONV_I16_I8,
457 ],
458 },
459 "template": True,
460 },
461 "conv2d_bias_TEMPLATE": {
462 "operands": (1, 2),
463 "build_fcn": (TBuilder.Conv2dWithBias, TGen.tgConv2d, ArgGen.agConv2d),
464 "types": {
465 "tf": [tf.float32],
466 "tflite": [
467 tf.float32,
468 QuantType.CONV_U8_U8,
469 QuantType.CONV_I8_I8,
470 QuantType.CONV_I16_I8,
471 ],
472 },
473 "bias": True,
474 "template": True,
475 },
TatWai Chongfd629052022-07-25 04:01:58 +0000476 "conv3d_TEMPLATE": {
477 "operands": (1, 1),
478 "build_fcn": (TBuilder.Conv3d, TGen.tgConv3d, ArgGen.agConv3d),
479 "types": {
480 "tf": [tf.float32],
481 "tflite": [
482 tf.float32,
483 QuantType.CONV_U8_U8,
484 QuantType.CONV_I8_I8,
485 # Quantization to 16x8-bit not yet supported by tflite.
486 ],
487 },
488 "template": True,
489 "rank": (1, 5),
490 },
491 "conv3d_bias_TEMPLATE": {
492 "operands": (1, 2),
493 "build_fcn": (TBuilder.Conv3dWithBias, TGen.tgConv3d, ArgGen.agConv3d),
494 "types": {
495 "tf": [tf.float32],
496 "tflite": [
497 tf.float32,
498 QuantType.CONV_U8_U8,
499 QuantType.CONV_I8_I8,
500 # Quantization to 16x8-bit not yet supported by tflite.
501 ],
502 },
503 "bias": True,
504 "template": True,
505 "rank": (1, 5),
506 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000507 "depthwise_conv2d_TEMPLATE": {
508 "operands": (1, 1),
509 "build_fcn": (
510 TBuilder.DepthwiseConv2d,
511 TGen.tgDepthwiseConv2d,
512 ArgGen.agDepthwiseConv2d,
513 ),
514 "types": {
515 "tf": [tf.float32],
516 "tflite": [
517 tf.float32,
518 QuantType.CONV_U8_U8,
519 QuantType.CONV_I8_I8,
520 QuantType.CONV_I16_I8,
521 ],
522 },
523 "template": True,
524 },
525 "depthwise_conv2d_bias_TEMPLATE": {
526 "operands": (1, 2),
527 "build_fcn": (
528 TBuilder.DepthwiseConv2dWithBias,
529 TGen.tgDepthwiseConv2d,
530 ArgGen.agDepthwiseConv2d,
531 ),
532 "types": {
533 "tf": [tf.float32],
534 "tflite": [
535 tf.float32,
536 QuantType.CONV_U8_U8,
537 QuantType.CONV_I8_I8,
538 QuantType.CONV_I16_I8,
539 ],
540 },
541 "bias": True,
542 "template": True,
543 },
544 "transpose_conv2d_TEMPLATE": {
545 "operands": (1, 1),
546 "build_fcn": (
547 TBuilder.TransposeConv2d,
548 TGen.tgTransposeConv2d,
549 ArgGen.agTransposeConv2d,
550 ),
551 "types": {
552 "tf": [tf.float32],
553 "tflite": [
554 tf.float32,
555 QuantType.CONV_U8_U8,
556 QuantType.CONV_I8_I8,
557 QuantType.CONV_I16_I8,
558 ],
559 },
560 "template": True,
561 },
562 "argmax": {
563 "operands": (1, 0),
564 "build_fcn": (TBuilder.Argmax, TGen.tgBasic, ArgGen.agAxes),
565 "types": {"tf": TYPE_F},
566 },
567 "avg_pool2d": {
568 "operands": (1, 0),
569 "build_fcn": (TBuilder.AvgPool2d, TGen.tgPooling, ArgGen.agPooling),
570 "types": {
571 "tf": TYPE_F,
572 "tflite": list(
573 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
574 ),
575 },
576 },
577 "max_pool2d": {
578 "operands": (1, 0),
579 "build_fcn": (TBuilder.MaxPool2d, TGen.tgPooling, ArgGen.agPooling),
580 "types": {
581 "tf": TYPE_F,
582 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
583 # ALL_I16 not supported yet
584 # In tensorflow/compiler/mlir/lite/ir/tfl_ops.td,
585 # QI16 is missing from MaxPoolOperandAndResultConstraints
586 # If adding QI16 back this test can run through.
587 },
588 },
589 "reshape": {
590 "operands": (1, 0),
591 "build_fcn": (TBuilder.Reshape, TGen.tgBasic, ArgGen.agReshape),
592 "types": TYPE_FI,
593 },
594 "transpose": {
595 "operands": (1, 0),
596 "build_fcn": (TBuilder.Transpose, TGen.tgBasic, ArgGen.agTranspose),
597 "types": TYPE_FI,
598 },
599 "slice": {
600 "operands": (1, 0),
601 "build_fcn": (TBuilder.Slice, TGen.tgBasic, ArgGen.agSlice),
602 "types": TYPE_FI,
603 },
604 "strided_slice": {
605 "operands": (1, 0),
606 "build_fcn": (TBuilder.StridedSlice, TGen.tgBasic, ArgGen.agStridedSlice),
607 "types": TYPE_FI,
608 },
609 "select": {
610 "operands": (3, 0),
611 "build_fcn": (TBuilder.Select, TGen.tgSelect, ArgGen.agNone),
612 "types": TYPE_FI,
613 },
614 "addn": {
615 "operands": (4, 0),
616 "build_fcn": (TBuilder.Addn, TGen.tgBasic, ArgGen.agNone),
617 "types": TYPE_FI,
618 },
619 "concatv2": {
620 "operands": (4, 0),
621 "build_fcn": (TBuilder.Concatv2, TGen.tgBasic, ArgGen.agAxes),
622 "types": TYPE_FI,
623 },
624 "stack": {
625 "operands": (4, 0),
626 "build_fcn": (TBuilder.Stack, TGen.tgBasic, ArgGen.agStack),
627 "types": TYPE_FI,
628 },
629 "unstack": {
630 "operands": (1, 0),
631 "build_fcn": (TBuilder.Unstack, TGen.tgPooling, ArgGen.agAxes),
632 "types": TYPE_F,
633 },
TatWai Chongf7008da2022-09-09 09:35:40 +0000634 "mirrorpad": {
635 "operands": (1, 0),
636 "build_fcn": (TBuilder.MirrorPad, TGen.tgBasic, ArgGen.agMirrorPad),
637 "types": TYPE_FI,
638 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000639 "pad": {
640 "operands": (1, 0),
641 "build_fcn": (TBuilder.Pad, TGen.tgBasic, ArgGen.agPad),
TatWai Chong2226f902023-02-22 18:38:01 -0800642 "types": {
643 "tf": TYPE_F,
644 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
645 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000646 },
647 "expand_dims": {
648 "operands": (1, 0),
649 "build_fcn": (TBuilder.ExpandDims, TGen.tgBasic, ArgGen.agStack),
650 "types": TYPE_FI,
651 },
652 "shape": {
653 "operands": (1, 0),
654 "build_fcn": (TBuilder.Shape, TGen.tgBasic, ArgGen.agNone),
655 "types": TYPE_FI,
656 },
657 "rank": {
658 "operands": (1, 0),
659 "build_fcn": (TBuilder.Rank, TGen.tgBasic, ArgGen.agNone),
660 "types": TYPE_FI,
661 },
662 "fill": {
663 "operands": (1, 0),
664 "build_fcn": (TBuilder.Fill, TGen.tgBasic, ArgGen.agFill),
665 "types": TYPE_FI,
666 },
667 "elu": {
668 "operands": (1, 0),
669 "build_fcn": (TBuilder.Elu, TGen.tgBasic, ArgGen.agNone),
670 "types": TYPE_F,
671 },
672 "softmax": {
673 "operands": (1, 0),
674 "build_fcn": (TBuilder.Softmax, TGen.tgBasic, ArgGen.agNone),
675 "types": {
676 "tf": TYPE_F,
677 "tflite": list(
678 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
679 ),
680 },
681 },
682 "log_softmax": {
683 "operands": (1, 0),
684 "build_fcn": (TBuilder.LogSoftmax, TGen.tgBasic, ArgGen.agNone),
685 "types": TYPE_F,
686 },
687 "matmul": {
688 "operands": (2, 0),
689 "build_fcn": (TBuilder.MatMul, TGen.tgMatmul, ArgGen.agNone),
690 "types": {
691 "tf": TYPE_F,
692 "tflite": list(
693 TYPE_F
694 + [QuantType.ALL_U8, QuantType.ALL_I8]
695 # 16 bits matmul fail to convert
696 ),
697 },
698 },
699 "add_scalar": {
700 "operands": (1, 0),
701 "build_fcn": (TBuilder.AddScalar, TGen.tgBasic, ArgGen.agNone),
702 "types": TYPE_F,
703 },
704 "add_1d": {
705 "operands": (2, 0),
706 "build_fcn": (TBuilder.Add1d, TGen.tgBasic, ArgGen.agNone),
707 "types": TYPE_F,
708 },
709 "split": {
710 "operands": (1, 0),
711 "build_fcn": (TBuilder.Split, TGen.tgBasic, ArgGen.agSplit),
712 "types": TYPE_FI,
713 },
714 "tile": {
715 "operands": (1, 0),
716 "build_fcn": (TBuilder.Tile, TGen.tgBasic, ArgGen.agTile),
717 "types": TYPE_FI,
718 },
719 "reverse": {
720 "operands": (1, 0),
721 "build_fcn": (TBuilder.Reverse, TGen.tgBasic, ArgGen.agAxes),
722 "types": {"tf": TYPE_FI},
723 },
724 "gather": {
725 "operands": (1, 0),
726 "build_fcn": (TBuilder.Gather, TGen.tgBasic, ArgGen.agGather),
727 "types": TYPE_FI,
728 },
729 "gather_nd": {
730 "operands": (1, 0),
731 "build_fcn": (TBuilder.GatherNd, TGen.tgBasic, ArgGen.agGatherND),
732 "types": TYPE_FI,
733 },
734 "scatter_nd": {
735 "operands": (1, 0),
736 "build_fcn": (TBuilder.ScatterNd, TGen.tgBasic, ArgGen.agScatterND),
737 "types": TYPE_FI,
738 },
739 "space_to_batch": {
740 "operands": (1, 0),
741 "build_fcn": (TBuilder.SpaceToBatch, TGen.tgBasic, ArgGen.agSpaceToBatch),
742 "types": TYPE_F,
743 },
744 "batch_to_space": {
745 "operands": (1, 0),
746 "build_fcn": (TBuilder.BatchToSpace, TGen.tgBasic, ArgGen.agBatchToSpace),
747 "types": TYPE_F,
748 },
749 "space_to_depth": {
750 "operands": (1, 0),
751 "build_fcn": (TBuilder.SpaceToDepth, TGen.tgBasic, ArgGen.agSpaceToDepth),
752 "types": TYPE_F,
753 },
754 "depth_to_space": {
755 "operands": (1, 0),
756 "build_fcn": (TBuilder.DepthToSpace, TGen.tgBasic, ArgGen.agDepthToSpace),
757 "types": TYPE_F,
758 },
759 "one_hot": {
760 "operands": (3, 1),
761 "build_fcn": (TBuilder.OneHot, TGen.tgOneHot, ArgGen.agOneHot),
762 "types": TYPE_FI,
763 },
764 "fakequant": {
765 "operands": (1, 0),
766 "build_fcn": (
767 TBuilder.Fakequant,
768 TGen.tgBasic,
769 ArgGen.agFakequant,
770 ),
771 "types": {"tf": TYPE_F},
772 },
TatWai Chong0cef07e2023-02-27 13:22:52 -0800773 "resize": {
Jeremy Johnson015c3552022-02-23 12:15:03 +0000774 "operands": (1, 0),
TatWai Chong0cef07e2023-02-27 13:22:52 -0800775 "build_fcn": (TBuilder.Resize, TGen.tgPooling, ArgGen.agResize),
TatWai Chongf7326092022-06-08 12:17:14 -0700776 "types": {
777 "tf": TYPE_F,
778 "tflite": list(
779 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
780 ),
781 },
782 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000783 "left_shift": {
784 "operands": (1, 0),
785 "build_fcn": (TBuilder.LeftShift, TGen.tgBasic, ArgGen.agShift),
786 "types": {"tf": [tf.int32]},
787 },
788 "right_shift": {
789 "operands": (1, 0),
790 "build_fcn": (TBuilder.RightShift, TGen.tgBasic, ArgGen.agShift),
791 "types": {
792 "tf": [
793 tf.int32,
794 ]
795 },
796 },
Jerry Ge9e94af82022-10-27 09:57:00 -0700797 "while": {
798 "operands": (1, 0),
799 "build_fcn": (TBuilder.While, TGen.tgBasic, ArgGen.agNone),
800 "types": {
801 "tflite": list(TYPE_F),
802 },
803 },
804 "lstm": {
805 "operands": (1, 0),
806 "build_fcn": (TBuilder.LSTM, TGen.tgRecurrent, ArgGen.agNone),
807 "types": {
808 "tflite": [
809 tf.float32,
810 # tf.int32
811 ]
812 },
813 },
814 "gru": {
815 "operands": (1, 0),
816 "build_fcn": (TBuilder.GRU, TGen.tgRecurrent, ArgGen.agNone),
817 "types": {
818 "tflite": [
819 tf.float32,
820 # tf.int32
821 ]
822 },
823 },
824 "rnn": {
825 "operands": (1, 0),
826 "build_fcn": (TBuilder.RNN, TGen.tgRecurrent, ArgGen.agNone),
827 "types": {
828 "tflite": [
829 tf.float32,
830 ]
831 },
832 },
Luke Hutton261b7b62023-01-10 14:50:31 +0000833 "rfft2d": {
834 "operands": (1, 0),
835 "build_fcn": (TBuilder.RFFT2d, TGen.tgRFFT2d, ArgGen.agRFFT2d),
836 "types": {
837 "tflite": TYPE_F,
838 },
839 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000840}
841
842# Shapes to be tested; default can be overwritten
843shape_list = [
844 (1,),
845 (64,),
846 (14, 19),
847 (13, 21, 3),
Luke Hutton261b7b62023-01-10 14:50:31 +0000848 (1, 8, 16),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000849 (1, 4, 4, 4),
850 (1, 8, 4, 17),
851 (1, 4, 8, 19),
852 (1, 32, 32, 8),
853 (1, 7, 7, 9),
Jerry Gea56e1372023-02-13 20:51:26 -0800854 (1, 7, 7, 479),
TatWai Chong0cef07e2023-02-27 13:22:52 -0800855 (3, 1, 1, 7),
TatWai Chongfd629052022-07-25 04:01:58 +0000856 (2, 2, 7, 7, 2),
857 (1, 4, 8, 21, 17),
858 (3, 32, 16, 16, 5),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000859]
860
861
862def gen_rand_shapes(args):
863 """Overwrite the global shape list with a new list of random shapes"""
864 global shape_list
865
866 rng = np.random.default_rng(args.random_seed)
867
868 # Don't let things get too big... cap the maximum volume, but let
869 # an individual dimension be 1..47
870 max_total_volume = 32 * 32 * 4
871
872 shape_list = []
TatWai Chongfd629052022-07-25 04:01:58 +0000873 # Only iterate over ranks 2, 3, 4, and 5
874 for rank in range(2, 6):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000875 for n in range(args.random_shapes):
876 new_shape = rng.integers(1, 48, size=rank)
877
TatWai Chongfd629052022-07-25 04:01:58 +0000878 # Set the batch dimension on 4D or 5D objects to 1
879 if rank == 4 or rank == 5:
Jeremy Johnson015c3552022-02-23 12:15:03 +0000880 new_shape[0] = 1
881
882 # Limit the total shape volume and throw out any
883 # shapes that wouldn't leave at least size=2 in some non-batch dimension
884 volume = 1
885 skip_shape = False
886 for i in range(rank):
887
888 volume *= new_shape[i]
889
890 # Reduce the shape, while it's larger than the maximum volume
891 while volume > max_total_volume:
892 new_shape[i] = new_shape[i] // 2
893 volume = volume // 2
894
895 # Now an untenable dimension size? Skip this one.
896 if new_shape[i] < 1:
897 skip_shape = True
898
899 if not skip_shape:
900 shape_list.append(tuple(new_shape))
901
902
903# Construct, run and save a whole tensorflow tf.function to a protobuf file
904# or convert to .tflite if it's quantized unit test
905def run_unit_test(
906 op_name,
907 args,
908 test_dir,
909 curr_shape,
910 addl_args,
911 dtype,
912 excluded_framework_list,
913 quantized_inference_dtype,
914 result_name,
915 seed,
916):
917
918 try:
919 op = TF_OP_LIST[op_name]
920 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
921
922 # Get and seed a random number generator for this test
923 rng = np.random.default_rng(seed)
924
925 # return placeholders=(str: name, np.array: value)
926 # consts=(str: name, np.array: value)
927 placeholders, consts = tensor_gen_fcn(op, curr_shape, dtype, rng)
928
929 # if test doesn't have any placeholders/consts, terminated
930 if len(placeholders) == 0 and len(consts) == 0:
931 return True
932
933 if not args.quiet:
934 print(" {} ".format(test_dir))
935
936 try:
937 os.mkdir(test_dir)
938 except FileExistsError:
939 pass
940
941 const_nodes = [value for name, value in consts]
942
943 num_placeholders = len(placeholders)
944 # if test is quantized, create tensor quantization metadata info for
945 # each input tensor, based on different quantized type
946 if quantized_inference_dtype:
947 is_quantized = True
948 # TODO: support INT8 IFM x INT4 weight later
949 if quantized_inference_dtype == QuantType.ALL_U8:
950 qzero = [128] * num_placeholders
951 numpy_dtype = [np.uint8] * num_placeholders
952 tflite_inference_dtype = tf.uint8
953 elif quantized_inference_dtype == QuantType.ALL_I8:
954 qzero = [0] * num_placeholders
955 numpy_dtype = [np.int8] * num_placeholders
956 tflite_inference_dtype = tf.int8
957 elif quantized_inference_dtype == QuantType.ALL_I16:
958 qzero = [0] * num_placeholders
959 numpy_dtype = [np.int16] * num_placeholders
960 tflite_inference_dtype = tf.int16
961 elif quantized_inference_dtype == QuantType.CONV_U8_U8:
962 assert (
963 num_placeholders == 1
964 ), "Unsupported number of placeholders for Convolution: {}".format(
965 num_placeholders
966 )
967 qzero = [128] * num_placeholders
968 if num_placeholders == 2:
969 numpy_dtype = [np.uint8, np.uint8]
970 else:
971 numpy_dtype = [np.uint8, np.uint8, np.int32]
972 tflite_inference_dtype = tf.uint8
973 elif quantized_inference_dtype == QuantType.CONV_I8_I8:
974 assert (
975 num_placeholders == 1
976 ), "Unsupported number of placeholders for Convolution: {}".format(
977 num_placeholders
978 )
979 qzero = [0] * num_placeholders
980 if num_placeholders == 2:
981 numpy_dtype = [np.int8, np.int8]
982 else:
983 numpy_dtype = [np.int8, np.int8, np.int32]
984 tflite_inference_dtype = tf.int8
985 elif quantized_inference_dtype == QuantType.CONV_I16_I8:
986 assert (
987 num_placeholders == 1
988 ), "Unsupported number of placeholders for Convolution: {}".format(
989 num_placeholders
990 )
991 if num_placeholders == 2:
992 qzero = [0, 0]
993 numpy_dtype = [np.int16, np.int8]
994 else:
995 qzero = [0, 0, 0]
996 numpy_dtype = [
997 np.int16,
998 np.int8,
999 np.int64,
1000 ] # np.int64 to represent 40 bits accumulator
1001 tflite_inference_dtype = tf.int16
1002 else:
1003 raise Exception(
1004 "Unsupported fakequant dtype: {}".format(quantized_inference_dtype)
1005 )
1006
1007 else:
1008 is_quantized = False
1009
1010 tf_model_filename = None
1011 tf_result_npy_filename = None
1012 tf_result_name = None
1013
1014 tflite_model_filename = None
1015 tflite_result_npy_filename = None
1016 tflite_result_name = None
1017
1018 placeholder_names = []
1019 placeholder_vals = []
1020 placeholder_signatures = ()
1021 placeholder_npy_filenames = []
1022 placeholder_shapes = []
1023
1024 for idx, (name, val) in enumerate(placeholders):
1025 placeholder_names.append(name)
1026 placeholder_signatures = placeholder_signatures + (
1027 tf.TensorSpec(shape=val.shape, dtype=val.dtype, name=name),
1028 )
1029 placeholder_npy_filenames.append("{}.npy".format(name.split(":")[0]))
1030 placeholder_shapes.append(val.shape)
1031
1032 # Get test builder class
1033 fcn_node = op_fcn(*const_nodes, *addl_args, result_name)
1034 concrete_function = tf.function(input_signature=placeholder_signatures)(
1035 fcn_node.eval
1036 ).get_concrete_function()
1037
1038 if is_quantized:
1039
1040 assert dtype is tf.float32, "quantized test must come from float32 graph"
1041
1042 # 1. Quantize float placeholder npy to quantized to feed the graph
1043 for idx, (name, val) in enumerate(placeholders):
1044
1045 # we use np.amin()/np.amax() to determine dynamic range
1046 # for quantized test
1047 zeropoint = 0
1048 scale = 1.0
1049 if numpy_dtype[idx] != np.int64:
1050 qmin = np.iinfo(numpy_dtype[idx]).min
1051 qmax = np.iinfo(numpy_dtype[idx]).max
1052 num_bits = np.iinfo(numpy_dtype[idx]).bits
1053 # 40 bit is represented as np.int64
1054 else:
1055 num_bits = 40
1056 qmin = -(1 << num_bits)
1057 qmax = (1 << num_bits) - 1
1058
1059 min_val = np.amin(val)
1060 max_val = np.amax(val)
1061
1062 # for single value tensor, we set scale equal to the abs(value),
1063 # and fix zeropoint to 128
1064 # if val > 0, it'll be represented as 129,
1065 # where val = (129 - 128) * val
1066 # if val < 0, it'll be represented as 127,
1067 # where val = (127 - 128) * (-val)
1068 # if val == 0, it'll be represted as 128, with range [-128.0, 128.0]
1069 # and let quantized 1 represent the value
1070 # also adjust effective min/max consequently
1071 if max_val == min_val:
1072 if max_val != 0:
1073 scale = abs(max_val)
1074 else:
1075 scale = 1.0
1076 min_val = float(qmin - qzero[idx]) * scale
1077 max_val = float(qmax - qzero[idx]) * scale
1078 else:
1079 scale = (max_val - min_val) / float(qmax - qmin)
1080 zeropoint = int(round((-min_val) / scale)) + qmin
1081
1082 # run through tf.fakequant first to assure quantization error aligned
1083 fakequant_val = tf.quantization.fake_quant_with_min_max_args(
1084 val,
1085 min=min_val,
1086 max=max_val,
1087 num_bits=num_bits,
1088 name="gen_quant_npy",
1089 )
1090
1091 quant_val = np.round(fakequant_val / scale).astype(np.int32) + zeropoint
1092
1093 # very few unit tests after TF hash may/2020, this quantized
1094 # value for some reason exceed [0, 255] range
1095 saved_val = np.clip(quant_val, qmin, qmax).astype(numpy_dtype[idx])
1096
1097 # saved all quantized tensor as np.int32
1098 # since TOSA numpy Cpp API only supports int32
1099 np.save(
1100 os.path.join(test_dir, placeholder_npy_filenames[idx]),
1101 saved_val.astype(np.int32),
1102 False,
1103 )
1104
1105 placeholder_vals.append(tf.convert_to_tensor(saved_val))
1106
1107 # 2. Convert the model to quantized TFLite flatbuffer
1108 module = tf.Module()
1109 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1110 [concrete_function], module
1111 )
1112 converter.optimizations = [tf.lite.Optimize.DEFAULT]
1113 converter.experimental_new_converter = True
1114
1115 # use MLIR-based post-quantizer
1116 converter.experimental_new_quantizer = True
1117
1118 flag = (
1119 tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 # noqa: E501
1120 )
1121 if tflite_inference_dtype == tf.int16:
1122 converter.target_spec.supported_ops = [flag]
1123
1124 def input_stats():
1125 for i in range(0, args.num_samples):
1126 a = [
1127 TGen.getRand(shape, tf.float32, rng)
1128 for shape in placeholder_shapes
1129 ]
1130 yield a
1131
1132 converter.representative_dataset = input_stats
1133 converter.inference_input_type = tflite_inference_dtype
1134 converter.inference_output_type = tflite_inference_dtype
1135
1136 tflite_model = converter.convert()
1137
1138 tflite_model_filename = "model.tflite"
1139
1140 # Write out converted model to disk
1141 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1142 f.write(tflite_model)
1143
1144 else: # is_quantized is False
1145
1146 # 1. Saved out numpy array directly
1147 for idx, (name, val) in enumerate(placeholders):
1148 placeholder_vals.append(tf.convert_to_tensor(val))
1149 np.save(
1150 os.path.join(test_dir, placeholder_npy_filenames[idx]), val, False
1151 )
1152
1153 # 2.a Saved out .pb if framework includes tensorflow
1154 if "tf" not in excluded_framework_list:
1155 # Write out graph as protobuf to disk
1156 tf_model_filename = "model.pb"
1157 tf.io.write_graph(
1158 concrete_function.graph, test_dir, tf_model_filename, True
1159 )
1160
1161 # 2.b Saved out .tflite if framework includes tflite
1162 if "tflite" not in excluded_framework_list:
1163 # Convert the model to TFLite flatbuffer
1164 module = tf.Module()
1165 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1166 [concrete_function], module
1167 )
1168
1169 converter.experimental_new_converter = True
1170
1171 # Even it's non-quantized int32 test, this needs to be set to tf.float32
1172 converter.inference_input_type = tf.float32
1173 converter.inference_output_type = tf.float32
1174 tflite_model = converter.convert()
1175
1176 # Write out converted model to disk
1177 tflite_model_filename = "model.tflite"
1178 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1179 f.write(tflite_model)
1180
1181 # Get TF reference result if .pb is specified
1182 if tf_model_filename:
1183 tf_result_npy_filename = "tf_result.npy"
1184 tf_result = concrete_function(*placeholder_vals)
1185 np.save(os.path.join(test_dir, tf_result_npy_filename), tf_result, False)
1186
1187 tf_result_name = result_name
1188
1189 # Get TFLite inference result if .tflite is specified
1190 if tflite_model_filename:
1191 tflite_result_npy_filename = "tflite_result.npy"
1192
Luke Hutton5c844212023-01-27 14:17:52 +00001193 ops_with_optimized_only_kernel = ["elu", "ceil", "gather", "rfft2d"]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001194
1195 if args.tflite_kernel_mode == "optimized" or (
1196 op_name in ops_with_optimized_only_kernel
1197 ):
1198 interpreter = tf.lite.Interpreter(
1199 model_path=os.path.join(test_dir, tflite_model_filename)
1200 )
1201 elif args.tflite_kernel_mode == "reference":
1202 interpreter = tf.lite.Interpreter(
1203 model_path=os.path.join(test_dir, tflite_model_filename),
1204 experimental_op_resolver_type=OpResolverType.BUILTIN_REF,
1205 )
1206 else:
1207 assert 0, "unknown tflite interpreter mode {}".format(
1208 args.tflite_kernel_mode
1209 )
1210 interpreter.allocate_tensors()
1211
1212 input_details = interpreter.get_input_details()
1213 output_details = interpreter.get_output_details()
1214
1215 assert len(input_details) == len(
1216 placeholder_vals
1217 ), "number of placeholder mismatch"
1218
1219 for idx, val in enumerate(placeholder_vals):
1220 interpreter.set_tensor(input_details[idx]["index"], val.numpy())
1221
1222 interpreter.invoke()
1223 tflite_result = interpreter.get_tensor(output_details[0]["index"])
1224
1225 np.save(
1226 os.path.join(test_dir, tflite_result_npy_filename), tflite_result, False
1227 )
1228
1229 # Result tensor name would change after converting to TFLite flatbuffer
1230 # Overwrite the information from TFLite models directly.
1231 # Assume single result tensor now
1232 tflite_result_name = output_details[0]["name"]
1233
1234 # Write out test descriptor
1235 write_test_json(
1236 filename=os.path.join(test_dir, "test.json"),
1237 tf_model_filename=tf_model_filename,
1238 tf_result_npy_filename=tf_result_npy_filename,
1239 tf_result_name=tf_result_name,
1240 tflite_model_filename=tflite_model_filename,
1241 tflite_result_npy_filename=tflite_result_npy_filename,
1242 tflite_result_name=tflite_result_name,
1243 ifm_name=placeholder_names,
1244 ifm_file=placeholder_npy_filenames,
1245 ifm_shape=placeholder_shapes,
1246 framework_exclusions=excluded_framework_list,
1247 quantized=is_quantized,
1248 )
1249 except Exception as e:
1250 msg = "Error running task: {}".format(e)
1251 print(msg)
1252 print(
1253 "".join(
1254 traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)
1255 )
1256 )
1257 return False
1258 return True
1259
1260
1261def build_const_net(
1262 args,
1263 curr_shape,
1264 op_name,
1265 dtype,
1266 excluded_framework_list,
1267 quantized_inference_dtype,
1268 result_name,
1269 seed,
1270 rng,
1271 filter,
1272 unit_test_args,
1273):
1274
1275 if quantized_inference_dtype:
1276 quant_dtype = get_tf_dtype(quantized_inference_dtype)
1277 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, quant_dtype))
1278 else:
1279 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, dtype))
1280 test_dir = os.path.join(args.output_dir, test_dir)
1281
1282 # If the operator has an additional function to generate arguments, call it
1283 # here and iterate through the argument list that it generates
1284 op = TF_OP_LIST[op_name]
1285 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1286
TatWai Chongfd629052022-07-25 04:01:58 +00001287 try:
1288 rank_lo, rank_hi = op["rank"]
1289 except KeyError:
1290 # Set testing rank to (1, 4) in default.
1291 rank_lo = 1
1292 rank_hi = 4
1293
1294 if len(curr_shape) not in range(rank_lo, rank_hi + 1):
1295 return
1296
Jeremy Johnson015c3552022-02-23 12:15:03 +00001297 addl_args_tuple = arg_gen_fcn(op, curr_shape, rng)
1298 for desc, addl_args in addl_args_tuple:
Jeremy Johnson0e6218e2022-05-05 17:08:04 +01001299 # Only filter on the full test_name, not the output directory
1300 _, test_name = os.path.split(test_dir + desc)
1301 if not filter or filter.search(test_name):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001302 unit_test_args.append(
1303 [
1304 op_name,
1305 args,
1306 test_dir + desc,
1307 curr_shape,
1308 addl_args,
1309 dtype,
1310 excluded_framework_list,
1311 quantized_inference_dtype,
1312 result_name,
1313 seed,
1314 ]
1315 )
1316
1317
1318# python hash is not reproducible, create hash for our purpose
1319def op_name_hash(op_name):
1320 result = 0xDEADBEEF
1321 for ch in op_name:
1322 if result & 1:
1323 result = (ord(ch) << 24) ^ (result >> 1) ^ 0x82608EDB
1324 else:
1325 result = (ord(ch) << 24) ^ (result >> 1)
1326
1327 return result
1328
1329
1330def generate_op_tests(args, op_name, shape_list, result_name, filter, unit_test_args):
1331
1332 if not args.quiet:
1333 print(
1334 "Generating tests for {} ".format(
1335 op_name
1336 )
1337 )
1338
1339 op = TF_OP_LIST[op_name]
1340
1341 # Seed the RNG so that we get the same random tests for each test each time
1342 # If the number of tests for a given generation function changes, the tests
1343 # for that operator may also change accordingly, but this will at least keep
1344 # down churn across operators.
1345
1346 bounded_hash_val = (args.random_seed + op_name_hash(op_name)) % np.iinfo(
1347 np.int32
1348 ).max
1349 rng = np.random.default_rng(bounded_hash_val)
1350
1351 # this is a dictionary with 'tf' and 'tflite' as key
1352 # and value being the data types we want to test under these framework
1353
1354 if isinstance(op["types"], dict):
1355 try:
1356 tf_dtypes = op["types"]["tf"]
1357 except KeyError:
1358 tf_dtypes = []
1359 try:
1360 tflite_dtypes = op["types"]["tflite"]
1361 except KeyError:
1362 tflite_dtypes = []
1363 elif isinstance(op["types"], list):
1364 tf_dtypes = op["types"]
1365 tflite_dtypes = op["types"]
1366
1367 tf_nonquantized_dtypes = tf_dtypes # tf doesn't support quantized data types
1368 tflite_quantized_dtypes = []
1369 tflite_nonquantized_dtypes = []
1370 for dtype in tflite_dtypes:
1371 if isinstance(dtype, QuantType):
1372 tflite_quantized_dtypes.append(dtype)
1373 else:
1374 tflite_nonquantized_dtypes.append(dtype)
1375
1376 nonquantized_dtypes_set = set(tf_nonquantized_dtypes).union(
1377 set(tflite_nonquantized_dtypes)
1378 )
1379 nonquantized_dtypes = list(nonquantized_dtypes_set)
1380 quantized_dtypes = tflite_quantized_dtypes
1381
1382 # populate non quantized unit test arguments
1383 for dtype in nonquantized_dtypes:
1384
1385 excluded_framework_set = set(ALL_FRAMEWORKS)
1386 if dtype in tf_nonquantized_dtypes:
1387 excluded_framework_set.remove("tf")
1388 if dtype in tflite_nonquantized_dtypes:
1389 excluded_framework_set.remove("tflite")
1390 excluded_framework_list = list(excluded_framework_set)
1391
1392 for curr_shape in shape_list:
1393 build_const_net(
1394 args,
1395 curr_shape,
1396 op_name,
1397 dtype,
1398 excluded_framework_list,
1399 None,
1400 result_name,
1401 bounded_hash_val,
1402 rng,
1403 filter,
1404 unit_test_args,
1405 )
1406
1407 # populate quantized unit test arguments
1408 # must exclude 'tf' and source dtype being tf.float32
1409 for dtype in quantized_dtypes:
1410 for curr_shape in shape_list:
1411 build_const_net(
1412 args,
1413 curr_shape,
1414 op_name,
1415 tf.float32,
1416 ["tf"],
1417 dtype,
1418 result_name,
1419 bounded_hash_val,
1420 rng,
1421 filter,
1422 unit_test_args,
1423 )
1424
1425 return unit_test_args
1426
1427
1428def createDynamicOpLists():
1429 """The templated operators are conv2d-style operators with a number of kernel
1430 sizes. Since the operator is unchanged, we generate the range of kernel
1431 sizes here in this loop and remove the original templates from the list.
1432
1433 This could be expanded to non-conv2d-style operators in the future."""
1434
1435 # Dynamically create op lists for convolutions with a list of kernel sizes
1436 KERNELS = [
1437 [1, 1],
1438 [3, 3],
1439 [5, 5],
1440 ]
1441
TatWai Chongfd629052022-07-25 04:01:58 +00001442 # dim = [D, H, W]
1443 KERNELS_3D = [
1444 [1, 1, 1],
1445 [2, 3, 3],
1446 [3, 5, 5],
1447 ]
1448
Jeremy Johnson015c3552022-02-23 12:15:03 +00001449 TEMPLATE_LIST = [
1450 "conv2d",
1451 "conv2d_bias",
1452 "conv2d_relu",
1453 "conv2d_relu6",
1454 "conv2d_relu_n1_to_1",
1455 "conv2d_tanh",
1456 "depthwise_conv2d",
1457 "depthwise_conv2d_bias",
1458 "transpose_conv2d",
1459 ]
1460
TatWai Chongfd629052022-07-25 04:01:58 +00001461 TEMPLATE_LIST_CONV3D = [
1462 "conv3d",
1463 "conv3d_bias",
1464 ]
1465
Jeremy Johnson015c3552022-02-23 12:15:03 +00001466 for t in TEMPLATE_LIST:
1467 for k in KERNELS:
1468 testName = "{}_{}x{}".format(t, k[0], k[1])
1469 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1470 TF_OP_LIST[testName]["filter"] = k
1471 TF_OP_LIST[testName]["template"] = False
1472
TatWai Chongfd629052022-07-25 04:01:58 +00001473 # The existing operators don't support the dimension of kernel that is higher than 2.
1474 for t in TEMPLATE_LIST_CONV3D:
1475 for k in KERNELS_3D:
1476 testName = "{}_{}x{}x{}".format(t, k[0], k[1], k[2])
1477 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1478 TF_OP_LIST[testName]["filter"] = k
1479 TF_OP_LIST[testName]["template"] = False
1480
Jeremy Johnson015c3552022-02-23 12:15:03 +00001481 # Delete any templates after having created any dynamic ops
1482 # This is a two-pass operation because it's bad practice to delete
1483 # keys from dictionaries while iterating
1484 keyList = []
1485 for k in TF_OP_LIST:
1486 try:
1487 if TF_OP_LIST[k]["template"]:
1488 keyList.append(k)
1489 continue
1490 except KeyError:
1491 pass
1492
1493 for k in keyList:
1494 del TF_OP_LIST[k]
1495
1496
1497def main():
1498 parser = argparse.ArgumentParser()
1499 parser.add_argument(
1500 "--seed", dest="random_seed", default=42, type=int, help="Random seed"
1501 )
1502 parser.add_argument(
1503 "--random-shapes",
1504 dest="random_shapes",
1505 default=0,
1506 type=int,
1507 help=(
1508 "Use N random shapes of each rank for generating tests,"
1509 "seeded with random seed"
1510 ),
1511 )
1512 parser.add_argument(
1513 "-o",
1514 "--output-dir",
1515 dest="output_dir",
1516 default=".",
1517 type=str,
1518 help="Test output directory path prefix",
1519 )
1520 parser.add_argument(
1521 "-q",
1522 "--quiet",
1523 dest="quiet",
1524 default=False,
1525 action="store_true",
1526 help="Do not print test names",
1527 )
1528 parser.add_argument(
1529 "-j", "--jobs", dest="jobs", type=int, default=1, help="Number of parallel jobs"
1530 )
1531 parser.add_argument(
1532 "-m",
1533 "--tflite-kernel-mode",
1534 dest="tflite_kernel_mode",
1535 type=str,
1536 choices=["reference", "optimized"],
1537 default="reference",
1538 help="TFLite interpreter kernel mode",
1539 )
1540 parser.add_argument(
1541 "--num-samples",
1542 dest="num_samples",
1543 default=200,
1544 type=int,
1545 help="Number of input samples for post-training quantization",
1546 )
1547 parser.add_argument(
1548 "--filter",
1549 dest="filter",
1550 default="",
1551 type=str,
1552 help="Filter test names by this expression",
1553 )
1554 args = parser.parse_args()
1555
1556 # Turn the filter into a re object if present
1557 filter = None
1558 if args.filter != "":
1559 filter = re.compile(args.filter)
1560
1561 # Autodetect CPU count
1562 if args.jobs <= 0:
1563 args.jobs = os.cpu_count()
1564
1565 # Disable TF info messages
1566 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
1567
1568 try:
1569 os.makedirs(args.output_dir)
1570 except FileExistsError:
1571 pass
1572
1573 if args.random_shapes:
1574 gen_rand_shapes(args)
1575
1576 # Build dynamic ops
1577 createDynamicOpLists()
1578
1579 # Generate the test list and arguments to run_unit_test()
1580 unit_test_args = []
1581
1582 for op in TF_OP_LIST:
1583 generate_op_tests(args, op, shape_list, "result", filter, unit_test_args)
1584
1585 errors = 0
1586 for t in unit_test_args:
1587 if not run_unit_test(*t):
1588 errors = errors + 1
1589
1590 if not args.quiet:
1591 print("\nAll tasks done - with {} errors".format(errors))
1592
1593 return 1 if errors else 0
1594
1595
1596if __name__ == "__main__":
1597 exit(main())