blob: 93bdfe0515ccb904bd7373ad919859bd801f909e [file] [log] [blame]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001#!/usr/bin/env python3
Luke Hutton261b7b62023-01-10 14:50:31 +00002# Copyright (c) 2020-2023, ARM Limited.
Jeremy Johnson015c3552022-02-23 12:15:03 +00003# SPDX-License-Identifier: Apache-2.0
4import argparse
5import os
6import re
7import traceback
8
9import numpy as np
10
11# Level | Level for Humans | Level Description
12# -------|------------------|------------------------------------
13# 0 | DEBUG | [Default] Print all messages
14# 1 | INFO | Filter out INFO messages
15# 2 | WARNING | Filter out INFO & WARNING messages
16# 3 | ERROR | Filter out all messages
17# Filter tensorflow debug message except errors
18os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
19
20# Flake8 E402 - ignore imports not at top of file to allow os.environ setting
21import tensorflow as tf # noqa: E402
22from frameworks.write_test_json import write_test_json # noqa: E402
23from frameworks.arg_gen import ArgGen # noqa: E402
24from frameworks.tensor_gen import TGen # noqa: E402
25from frameworks.test_builder import TBuilder # noqa: E402
Jeremy Johnson5d1a3472022-03-31 09:50:06 +010026from frameworks.test_gen_utils import ( # noqa: E402
Jeremy Johnson015c3552022-02-23 12:15:03 +000027 QuantType,
28 get_tf_dtype,
29 get_shape_str,
30) # noqa: E402
31from tensorflow.lite.python.interpreter import OpResolverType # noqa: E402
32
33# All of the supported frameworks
34ALL_FRAMEWORKS = ["tf", "tflite"]
35
36# Lists of different data types
37TYPE_F = [tf.float32]
38TYPE_I = [tf.int32]
39TYPE_FI = [tf.float32, tf.int32]
40TYPE_B = [tf.bool]
41TYPE_FIB = [tf.float32, tf.int32, tf.bool]
42TYPE_H = [tf.float16]
43TYPE_FH = [tf.float32, tf.float16]
44TYPE_FHI = [tf.float32, tf.float16, tf.int32]
45TYPE_FHIB = [tf.float32, tf.float16, tf.int32, tf.bool]
46
47# The list of operator tests
48# Each dictionary entry for an op is a dictionary with the following required members:
49# 'operands': tuple (number_of_placeholder_tensors, number_of_constant_tensors)
50# 'build_fcn: tuple (Test builder function, Tensor generator function,
51# Argument generator function)
52# 'types': list of Tensorflow types that should be tested for this op
53# OR
54# a dictionary of {'framework_name': [type_list] } for cases where only
55# a subset of the types should be tested in each framework. This can also
56# be used to restrict an operator to a particular framework.
57#
58# And optional members:
59# 'template': boolean (indicates that this is a templated op which gets further
60# processing in createDynamicOpLists)
61# 'bias': boolean indicating that there is a bias component to be generated
62# 'qtypes': List of QuantType quantized types to generate for this op
TatWai Chongfd629052022-07-25 04:01:58 +000063# 'rank': tuple (lowest rank, highest rank). Dimension range of input tensor.
Jeremy Johnson015c3552022-02-23 12:15:03 +000064
65TF_OP_LIST = {
66 "add": {
67 "operands": (2, 0),
68 "build_fcn": (TBuilder.Add, TGen.tgBFuzz, ArgGen.agNone),
69 "types": {
70 "tf": TYPE_FI,
71 "tflite": list(
72 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
73 ),
74 },
75 },
76 "sub": {
77 "operands": (2, 0),
78 "build_fcn": (TBuilder.Sub, TGen.tgBFuzz, ArgGen.agNone),
79 "types": {
80 "tf": TYPE_FI,
81 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
82 # QuantType.ALL_I16 fail in TFLite conversion
83 },
84 },
85 "mul": {
86 "operands": (2, 0),
87 "build_fcn": (TBuilder.Mul, TGen.tgBFuzz, ArgGen.agNone),
88 "types": {
89 "tf": TYPE_FI,
90 "tflite": list(
91 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
92 ),
93 },
94 },
95 "exp": {
96 "operands": (1, 0),
97 "build_fcn": (TBuilder.Exp, TGen.tgBasic, ArgGen.agNone),
98 "types": TYPE_F,
99 },
100 "rcp": {
101 "operands": (1, 0),
102 "build_fcn": (TBuilder.Rcp, TGen.tgBasic, ArgGen.agNone),
103 "types": TYPE_F,
104 },
105 "relu": {
106 "operands": (1, 0),
107 "build_fcn": (TBuilder.Relu, TGen.tgBasic, ArgGen.agNone),
108 "types": {
109 "tf": TYPE_F,
110 "tflite": list(
111 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
112 ),
113 },
114 },
Jerry Ge93912432022-07-22 10:29:13 -0700115 "relu1": {
116 "operands": (1, 0),
117 "build_fcn": (TBuilder.Relu1, TGen.tgBasic, ArgGen.agNone),
118 "types": {
119 "tf": [],
120 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
121 },
122 },
Jerry Ge2eea5bf2022-10-11 16:27:05 +0000123 "relu0To1": {
124 "operands": (1, 0),
125 "build_fcn": (TBuilder.Relu0To1, TGen.tgBasic, ArgGen.agNone),
126 "types": {
127 "tf": [],
128 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
129 },
130 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000131 "relu6": {
132 "operands": (1, 0),
133 "build_fcn": (TBuilder.Relu6, TGen.tgBasic, ArgGen.agNone),
134 "types": {
135 "tf": TYPE_F,
136 "tflite": list(
137 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
138 ),
139 },
140 },
141 "leaky_relu": {
142 "operands": (1, 0),
143 "build_fcn": (TBuilder.LeakyRelu, TGen.tgBasic, ArgGen.agFloat),
144 "types": {
145 "tf": TYPE_F,
146 "tflite": list(
147 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
148 ),
149 },
150 },
TatWai Chong41a04fe2022-11-03 21:44:32 +0000151 "prelu": {
152 "operands": (1, 0),
153 "build_fcn": (TBuilder.Prelu, TGen.tgBasic, ArgGen.agNone),
154 "types": {
155 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
156 },
157 },
TatWai Chong473eb382022-08-02 04:21:30 +0000158 "gelu": {
159 "operands": (1, 0),
160 "build_fcn": (TBuilder.Gelu, TGen.tgBasic, ArgGen.agNone),
161 "types": {
162 # Need compiler support for tf.Erf.
163 # "tf": TYPE_F,
164 "tflite": list(
165 # Only float32, int8 and uint8 supported currently
166 TYPE_F
167 + [QuantType.ALL_U8, QuantType.ALL_I8]
168 ),
169 },
170 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000171 "concat": {
172 "operands": (2, 0),
173 "build_fcn": (TBuilder.Concat, TGen.tgBasic, ArgGen.agAxes),
174 "types": TYPE_FI,
175 },
176 "bitwise_and": {
177 "operands": (2, 0),
178 "build_fcn": (TBuilder.BitwiseAnd, TGen.tgBFuzz, ArgGen.agNone),
179 "types": {"tf": TYPE_I}, # Not supported in TF Lite
180 },
181 "bitwise_or": {
182 "operands": (2, 0),
183 "build_fcn": (TBuilder.BitwiseOr, TGen.tgBFuzz, ArgGen.agNone),
184 "types": {"tf": TYPE_I}, # Not supported in TF Lite
185 },
186 "bitwise_not": {
187 "operands": (1, 0),
188 "build_fcn": (TBuilder.BitwiseNot, TGen.tgBFuzz, ArgGen.agNone),
189 "types": {"tf": TYPE_I}, # Not supported in TF Lite
190 },
191 "bitwise_xor": {
192 "operands": (2, 0),
193 "build_fcn": (TBuilder.BitwiseXor, TGen.tgBFuzz, ArgGen.agNone),
194 "types": {"tf": TYPE_I}, # Not supported in TF Lite
195 },
196 "logical_and": {
197 "operands": (2, 0),
198 "build_fcn": (TBuilder.LogicalAnd, TGen.tgBFuzz, ArgGen.agNone),
199 "types": TYPE_B,
200 },
201 "logical_or": {
202 "operands": (2, 0),
203 "build_fcn": (TBuilder.LogicalOr, TGen.tgBFuzz, ArgGen.agNone),
204 "types": TYPE_B,
205 },
206 "logical_not": {
207 "operands": (1, 0),
208 "build_fcn": (TBuilder.LogicalNot, TGen.tgBFuzz, ArgGen.agNone),
209 "types": TYPE_B,
210 },
211 "reduce_any": {
212 "operands": (1, 0),
213 "build_fcn": (TBuilder.ReduceAny, TGen.tgBasic, ArgGen.agAxesListKeepdims),
214 "types": TYPE_B,
215 },
216 "reduce_all": {
217 "operands": (1, 0),
218 "build_fcn": (TBuilder.ReduceAll, TGen.tgBasic, ArgGen.agAxesListKeepdims),
TatWai Chong6ad6d6e2022-11-11 15:28:50 -0800219 "types": TYPE_B,
Jeremy Johnson015c3552022-02-23 12:15:03 +0000220 },
221 "reduce_min": {
222 "operands": (1, 0),
223 "build_fcn": (TBuilder.ReduceMin, TGen.tgBasic, ArgGen.agAxesListKeepdims),
224 "types": {
225 "tf": TYPE_FI,
226 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
227 },
228 },
229 "reduce_max": {
230 "operands": (1, 0),
231 "build_fcn": (TBuilder.ReduceMax, TGen.tgBasic, ArgGen.agAxesListKeepdims),
232 "types": {
233 "tf": TYPE_FI,
234 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
235 },
236 },
237 "reduce_sum": {
238 "operands": (1, 0),
239 "build_fcn": (TBuilder.ReduceSum, TGen.tgBasic, ArgGen.agAxesListKeepdims),
240 "types": {
241 "tf": TYPE_F,
242 # v2 converter doesn't recognize quantized reduce_sum
243 # "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
244 "tflite": TYPE_F,
245 },
246 },
247 "reduce_mean": {
248 "operands": (1, 0),
249 "build_fcn": (TBuilder.ReduceMean, TGen.tgBasic, ArgGen.agAxesListKeepdims),
250 "types": {
251 "tf": TYPE_F,
252 "tflite": list(
253 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
254 ),
255 },
256 },
257 "reduce_product": {
258 "operands": (1, 0),
259 "build_fcn": (TBuilder.ReduceProduct, TGen.tgBasic, ArgGen.agAxesListKeepdims),
260 "types": TYPE_F,
261 },
262 "min": {
263 "operands": (2, 0),
264 "build_fcn": (TBuilder.Min, TGen.tgBFuzz, ArgGen.agNone),
265 "types": TYPE_FI,
266 },
267 "max": {
268 "operands": (2, 0),
269 "build_fcn": (TBuilder.Max, TGen.tgBFuzz, ArgGen.agNone),
270 "types": TYPE_FI,
271 },
272 "pow": {
273 "operands": (2, 0),
274 "build_fcn": (TBuilder.Pow, TGen.tgBFuzz, ArgGen.agNone),
275 # Technically, integer is supported, but only for positive exponents.
276 # Needs a random argument generator.
277 "types": TYPE_F,
278 },
279 "abs": {
280 "operands": (1, 0),
281 "build_fcn": (TBuilder.Abs, TGen.tgBasic, ArgGen.agNone),
282 "types": TYPE_F,
283 },
284 "ceil": {
285 "operands": (1, 0),
286 "build_fcn": (TBuilder.Ceil, TGen.tgBasic, ArgGen.agNone),
287 "types": TYPE_F,
288 },
289 "floor": {
290 "operands": (1, 0),
291 "build_fcn": (TBuilder.Floor, TGen.tgBasic, ArgGen.agNone),
292 "types": TYPE_F,
293 },
294 "log": {
295 "operands": (1, 0),
296 "build_fcn": (TBuilder.Log, TGen.tgBasic, ArgGen.agNone),
297 "types": TYPE_F,
298 },
299 "negate": {
300 "operands": (1, 0),
301 "build_fcn": (TBuilder.Negate, TGen.tgBasic, ArgGen.agNone),
302 "types": TYPE_F,
303 },
304 "rsqrt": {
305 "operands": (1, 0),
306 "build_fcn": (TBuilder.Rsqrt, TGen.tgBasic, ArgGen.agNone),
307 "types": TYPE_F,
308 },
TatWai Chongd713a4d2022-11-10 13:54:28 -0800309 "sign": {
310 "operands": (1, 0),
311 "build_fcn": (TBuilder.Sign, TGen.tgBasic, ArgGen.agNone),
312 "types": {
313 "tf": TYPE_F,
314 },
315 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000316 "sigmoid": {
317 "operands": (1, 0),
318 "build_fcn": (TBuilder.Sigmoid, TGen.tgBasic, ArgGen.agNone),
319 "types": {
320 "tf": TYPE_F,
321 "tflite": list(
322 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
323 ),
324 },
325 },
326 "tanh": {
327 "operands": (1, 0),
328 "build_fcn": (TBuilder.Tanh, TGen.tgBasic, ArgGen.agNone),
329 "types": {
330 "tf": TYPE_F,
331 "tflite": list(
332 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
333 ),
334 },
335 },
Luke Hutton41601862022-12-06 17:29:15 +0000336 "sin": {
337 "operands": (1, 0),
338 "build_fcn": (TBuilder.Sin, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000339 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000340 },
341 "cos": {
342 "operands": (1, 0),
343 "build_fcn": (TBuilder.Cos, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000344 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000345 },
Luke Hutton2138a192022-12-15 11:01:39 +0000346 "atan2": {
347 "operands": (2, 0),
348 "build_fcn": (TBuilder.Atan2, TGen.tgBasic, ArgGen.agNone),
349 "types": {
350 "tflite": TYPE_F,
351 },
352 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000353 "square": {
354 "operands": (1, 0),
355 "build_fcn": (TBuilder.Square, TGen.tgBasic, ArgGen.agNone),
356 "types": TYPE_F,
357 },
358 "squared_difference": {
359 "operands": (2, 0),
360 "build_fcn": (TBuilder.SquaredDifference, TGen.tgBFuzz, ArgGen.agNone),
361 "types": TYPE_F,
362 },
363 "equal": {
364 "operands": (2, 0),
365 "build_fcn": (TBuilder.Equal, TGen.tgBFuzz, ArgGen.agNone),
366 "types": TYPE_FI,
367 },
368 "greater_equal": {
369 "operands": (2, 0),
370 "build_fcn": (TBuilder.GreaterEqual, TGen.tgBFuzz, ArgGen.agNone),
371 "types": TYPE_FI,
372 },
373 "greater": {
374 "operands": (2, 0),
375 "build_fcn": (TBuilder.Greater, TGen.tgBFuzz, ArgGen.agNone),
376 "types": TYPE_FI,
377 },
378 "less": {
379 "operands": (2, 0),
380 "build_fcn": (TBuilder.Less, TGen.tgBFuzz, ArgGen.agNone),
381 "types": TYPE_FI,
382 },
383 "less_equal": {
384 "operands": (2, 0),
385 "build_fcn": (TBuilder.LessEqual, TGen.tgBFuzz, ArgGen.agNone),
386 "types": TYPE_FI,
387 },
388 "conv2d_TEMPLATE": {
389 "operands": (1, 1),
390 "build_fcn": (TBuilder.Conv2d, TGen.tgConv2d, ArgGen.agConv2d),
391 "types": {
392 "tf": [tf.float32],
393 "tflite": [
394 tf.float32,
395 QuantType.CONV_U8_U8,
396 QuantType.CONV_I8_I8,
397 QuantType.CONV_I16_I8,
398 ],
399 },
400 "template": True,
401 },
402 "conv2d_relu_TEMPLATE": {
403 "operands": (1, 2),
404 "build_fcn": (TBuilder.Conv2dRelu, TGen.tgConv2d, ArgGen.agNone),
405 "types": {
406 "tf": [tf.float32],
407 "tflite": [
408 tf.float32,
409 QuantType.CONV_U8_U8,
410 QuantType.CONV_I8_I8,
411 QuantType.CONV_I16_I8,
412 ],
413 },
414 "template": True,
415 },
416 "conv2d_relu6_TEMPLATE": {
417 "operands": (1, 2),
418 "build_fcn": (TBuilder.Conv2dRelu6, TGen.tgConv2d, ArgGen.agNone),
419 "types": {
420 "tf": [tf.float32],
421 "tflite": [
422 tf.float32,
423 QuantType.CONV_U8_U8,
424 QuantType.CONV_I8_I8,
425 QuantType.CONV_I16_I8,
426 ],
427 },
428 "template": True,
429 },
430 "conv2d_relu_n1_to_1_TEMPLATE": {
431 "operands": (1, 2),
432 "build_fcn": (TBuilder.Conv2dReluN1To1, TGen.tgConv2d, ArgGen.agNone),
433 "types": {
434 "tf": [tf.float32],
435 "tflite": [
436 tf.float32,
437 QuantType.CONV_U8_U8,
438 QuantType.CONV_I8_I8,
439 QuantType.CONV_I16_I8,
440 ],
441 },
442 "template": True,
443 },
444 # This test is converted as:
445 # tfl.conv2d(){fused_activation_function="NONE"} + tfl.tanh()
446 # TODO: anyway to generate tfl.conv2d(){fused_activation_function="TANH"}?
447 "conv2d_tanh_TEMPLATE": {
448 "operands": (1, 2),
449 "build_fcn": (TBuilder.Conv2dTanh, TGen.tgConv2d, ArgGen.agNone),
450 "types": {
451 "tf": [tf.float32],
452 "tflite": [
453 tf.float32,
454 QuantType.CONV_U8_U8,
455 QuantType.CONV_I8_I8,
456 QuantType.CONV_I16_I8,
457 ],
458 },
459 "template": True,
460 },
461 "conv2d_bias_TEMPLATE": {
462 "operands": (1, 2),
463 "build_fcn": (TBuilder.Conv2dWithBias, TGen.tgConv2d, ArgGen.agConv2d),
464 "types": {
465 "tf": [tf.float32],
466 "tflite": [
467 tf.float32,
468 QuantType.CONV_U8_U8,
469 QuantType.CONV_I8_I8,
470 QuantType.CONV_I16_I8,
471 ],
472 },
473 "bias": True,
474 "template": True,
475 },
TatWai Chongfd629052022-07-25 04:01:58 +0000476 "conv3d_TEMPLATE": {
477 "operands": (1, 1),
478 "build_fcn": (TBuilder.Conv3d, TGen.tgConv3d, ArgGen.agConv3d),
479 "types": {
480 "tf": [tf.float32],
481 "tflite": [
482 tf.float32,
483 QuantType.CONV_U8_U8,
484 QuantType.CONV_I8_I8,
485 # Quantization to 16x8-bit not yet supported by tflite.
486 ],
487 },
488 "template": True,
489 "rank": (1, 5),
490 },
491 "conv3d_bias_TEMPLATE": {
492 "operands": (1, 2),
493 "build_fcn": (TBuilder.Conv3dWithBias, TGen.tgConv3d, ArgGen.agConv3d),
494 "types": {
495 "tf": [tf.float32],
496 "tflite": [
497 tf.float32,
498 QuantType.CONV_U8_U8,
499 QuantType.CONV_I8_I8,
500 # Quantization to 16x8-bit not yet supported by tflite.
501 ],
502 },
503 "bias": True,
504 "template": True,
505 "rank": (1, 5),
506 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000507 "depthwise_conv2d_TEMPLATE": {
508 "operands": (1, 1),
509 "build_fcn": (
510 TBuilder.DepthwiseConv2d,
511 TGen.tgDepthwiseConv2d,
512 ArgGen.agDepthwiseConv2d,
513 ),
514 "types": {
515 "tf": [tf.float32],
516 "tflite": [
517 tf.float32,
518 QuantType.CONV_U8_U8,
519 QuantType.CONV_I8_I8,
520 QuantType.CONV_I16_I8,
521 ],
522 },
523 "template": True,
524 },
525 "depthwise_conv2d_bias_TEMPLATE": {
526 "operands": (1, 2),
527 "build_fcn": (
528 TBuilder.DepthwiseConv2dWithBias,
529 TGen.tgDepthwiseConv2d,
530 ArgGen.agDepthwiseConv2d,
531 ),
532 "types": {
533 "tf": [tf.float32],
534 "tflite": [
535 tf.float32,
536 QuantType.CONV_U8_U8,
537 QuantType.CONV_I8_I8,
538 QuantType.CONV_I16_I8,
539 ],
540 },
541 "bias": True,
542 "template": True,
543 },
544 "transpose_conv2d_TEMPLATE": {
545 "operands": (1, 1),
546 "build_fcn": (
547 TBuilder.TransposeConv2d,
548 TGen.tgTransposeConv2d,
549 ArgGen.agTransposeConv2d,
550 ),
551 "types": {
552 "tf": [tf.float32],
553 "tflite": [
554 tf.float32,
555 QuantType.CONV_U8_U8,
556 QuantType.CONV_I8_I8,
557 QuantType.CONV_I16_I8,
558 ],
559 },
560 "template": True,
561 },
562 "argmax": {
563 "operands": (1, 0),
564 "build_fcn": (TBuilder.Argmax, TGen.tgBasic, ArgGen.agAxes),
565 "types": {"tf": TYPE_F},
566 },
567 "avg_pool2d": {
568 "operands": (1, 0),
569 "build_fcn": (TBuilder.AvgPool2d, TGen.tgPooling, ArgGen.agPooling),
570 "types": {
571 "tf": TYPE_F,
572 "tflite": list(
573 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
574 ),
575 },
576 },
577 "max_pool2d": {
578 "operands": (1, 0),
579 "build_fcn": (TBuilder.MaxPool2d, TGen.tgPooling, ArgGen.agPooling),
580 "types": {
581 "tf": TYPE_F,
582 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
583 # ALL_I16 not supported yet
584 # In tensorflow/compiler/mlir/lite/ir/tfl_ops.td,
585 # QI16 is missing from MaxPoolOperandAndResultConstraints
586 # If adding QI16 back this test can run through.
587 },
588 },
589 "reshape": {
590 "operands": (1, 0),
591 "build_fcn": (TBuilder.Reshape, TGen.tgBasic, ArgGen.agReshape),
592 "types": TYPE_FI,
593 },
594 "transpose": {
595 "operands": (1, 0),
596 "build_fcn": (TBuilder.Transpose, TGen.tgBasic, ArgGen.agTranspose),
597 "types": TYPE_FI,
598 },
599 "slice": {
600 "operands": (1, 0),
601 "build_fcn": (TBuilder.Slice, TGen.tgBasic, ArgGen.agSlice),
602 "types": TYPE_FI,
603 },
604 "strided_slice": {
605 "operands": (1, 0),
606 "build_fcn": (TBuilder.StridedSlice, TGen.tgBasic, ArgGen.agStridedSlice),
607 "types": TYPE_FI,
608 },
609 "select": {
610 "operands": (3, 0),
611 "build_fcn": (TBuilder.Select, TGen.tgSelect, ArgGen.agNone),
612 "types": TYPE_FI,
613 },
614 "addn": {
615 "operands": (4, 0),
616 "build_fcn": (TBuilder.Addn, TGen.tgBasic, ArgGen.agNone),
617 "types": TYPE_FI,
618 },
619 "concatv2": {
620 "operands": (4, 0),
621 "build_fcn": (TBuilder.Concatv2, TGen.tgBasic, ArgGen.agAxes),
622 "types": TYPE_FI,
623 },
624 "stack": {
625 "operands": (4, 0),
626 "build_fcn": (TBuilder.Stack, TGen.tgBasic, ArgGen.agStack),
627 "types": TYPE_FI,
628 },
629 "unstack": {
630 "operands": (1, 0),
631 "build_fcn": (TBuilder.Unstack, TGen.tgPooling, ArgGen.agAxes),
632 "types": TYPE_F,
633 },
TatWai Chongf7008da2022-09-09 09:35:40 +0000634 "mirrorpad": {
635 "operands": (1, 0),
636 "build_fcn": (TBuilder.MirrorPad, TGen.tgBasic, ArgGen.agMirrorPad),
637 "types": TYPE_FI,
638 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000639 "pad": {
640 "operands": (1, 0),
641 "build_fcn": (TBuilder.Pad, TGen.tgBasic, ArgGen.agPad),
TatWai Chong2226f902023-02-22 18:38:01 -0800642 "types": {
643 "tf": TYPE_F,
644 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
645 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000646 },
647 "expand_dims": {
648 "operands": (1, 0),
649 "build_fcn": (TBuilder.ExpandDims, TGen.tgBasic, ArgGen.agStack),
650 "types": TYPE_FI,
651 },
652 "shape": {
653 "operands": (1, 0),
654 "build_fcn": (TBuilder.Shape, TGen.tgBasic, ArgGen.agNone),
655 "types": TYPE_FI,
656 },
657 "rank": {
658 "operands": (1, 0),
659 "build_fcn": (TBuilder.Rank, TGen.tgBasic, ArgGen.agNone),
660 "types": TYPE_FI,
661 },
662 "fill": {
663 "operands": (1, 0),
664 "build_fcn": (TBuilder.Fill, TGen.tgBasic, ArgGen.agFill),
665 "types": TYPE_FI,
666 },
667 "elu": {
668 "operands": (1, 0),
669 "build_fcn": (TBuilder.Elu, TGen.tgBasic, ArgGen.agNone),
670 "types": TYPE_F,
671 },
672 "softmax": {
673 "operands": (1, 0),
674 "build_fcn": (TBuilder.Softmax, TGen.tgBasic, ArgGen.agNone),
675 "types": {
676 "tf": TYPE_F,
677 "tflite": list(
678 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
679 ),
680 },
681 },
682 "log_softmax": {
683 "operands": (1, 0),
684 "build_fcn": (TBuilder.LogSoftmax, TGen.tgBasic, ArgGen.agNone),
685 "types": TYPE_F,
686 },
687 "matmul": {
688 "operands": (2, 0),
689 "build_fcn": (TBuilder.MatMul, TGen.tgMatmul, ArgGen.agNone),
690 "types": {
691 "tf": TYPE_F,
692 "tflite": list(
693 TYPE_F
694 + [QuantType.ALL_U8, QuantType.ALL_I8]
695 # 16 bits matmul fail to convert
696 ),
697 },
698 },
699 "add_scalar": {
700 "operands": (1, 0),
701 "build_fcn": (TBuilder.AddScalar, TGen.tgBasic, ArgGen.agNone),
702 "types": TYPE_F,
703 },
704 "add_1d": {
705 "operands": (2, 0),
706 "build_fcn": (TBuilder.Add1d, TGen.tgBasic, ArgGen.agNone),
707 "types": TYPE_F,
708 },
709 "split": {
710 "operands": (1, 0),
711 "build_fcn": (TBuilder.Split, TGen.tgBasic, ArgGen.agSplit),
712 "types": TYPE_FI,
713 },
714 "tile": {
715 "operands": (1, 0),
716 "build_fcn": (TBuilder.Tile, TGen.tgBasic, ArgGen.agTile),
717 "types": TYPE_FI,
718 },
719 "reverse": {
720 "operands": (1, 0),
721 "build_fcn": (TBuilder.Reverse, TGen.tgBasic, ArgGen.agAxes),
722 "types": {"tf": TYPE_FI},
723 },
724 "gather": {
725 "operands": (1, 0),
726 "build_fcn": (TBuilder.Gather, TGen.tgBasic, ArgGen.agGather),
727 "types": TYPE_FI,
728 },
729 "gather_nd": {
730 "operands": (1, 0),
731 "build_fcn": (TBuilder.GatherNd, TGen.tgBasic, ArgGen.agGatherND),
732 "types": TYPE_FI,
733 },
734 "scatter_nd": {
735 "operands": (1, 0),
736 "build_fcn": (TBuilder.ScatterNd, TGen.tgBasic, ArgGen.agScatterND),
737 "types": TYPE_FI,
738 },
739 "space_to_batch": {
740 "operands": (1, 0),
741 "build_fcn": (TBuilder.SpaceToBatch, TGen.tgBasic, ArgGen.agSpaceToBatch),
742 "types": TYPE_F,
743 },
744 "batch_to_space": {
745 "operands": (1, 0),
746 "build_fcn": (TBuilder.BatchToSpace, TGen.tgBasic, ArgGen.agBatchToSpace),
747 "types": TYPE_F,
748 },
749 "space_to_depth": {
750 "operands": (1, 0),
751 "build_fcn": (TBuilder.SpaceToDepth, TGen.tgBasic, ArgGen.agSpaceToDepth),
752 "types": TYPE_F,
753 },
754 "depth_to_space": {
755 "operands": (1, 0),
756 "build_fcn": (TBuilder.DepthToSpace, TGen.tgBasic, ArgGen.agDepthToSpace),
757 "types": TYPE_F,
758 },
759 "one_hot": {
760 "operands": (3, 1),
761 "build_fcn": (TBuilder.OneHot, TGen.tgOneHot, ArgGen.agOneHot),
762 "types": TYPE_FI,
763 },
764 "fakequant": {
765 "operands": (1, 0),
766 "build_fcn": (
767 TBuilder.Fakequant,
768 TGen.tgBasic,
769 ArgGen.agFakequant,
770 ),
771 "types": {"tf": TYPE_F},
772 },
TatWai Chong0cef07e2023-02-27 13:22:52 -0800773 "resize": {
Jeremy Johnson015c3552022-02-23 12:15:03 +0000774 "operands": (1, 0),
TatWai Chong0cef07e2023-02-27 13:22:52 -0800775 "build_fcn": (TBuilder.Resize, TGen.tgPooling, ArgGen.agResize),
TatWai Chongf7326092022-06-08 12:17:14 -0700776 "types": {
777 "tf": TYPE_F,
778 "tflite": list(
779 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
780 ),
781 },
782 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000783 "left_shift": {
784 "operands": (1, 0),
785 "build_fcn": (TBuilder.LeftShift, TGen.tgBasic, ArgGen.agShift),
786 "types": {"tf": [tf.int32]},
787 },
788 "right_shift": {
789 "operands": (1, 0),
790 "build_fcn": (TBuilder.RightShift, TGen.tgBasic, ArgGen.agShift),
791 "types": {
792 "tf": [
793 tf.int32,
794 ]
795 },
796 },
Jerry Ge9e94af82022-10-27 09:57:00 -0700797 "while": {
798 "operands": (1, 0),
799 "build_fcn": (TBuilder.While, TGen.tgBasic, ArgGen.agNone),
800 "types": {
801 "tflite": list(TYPE_F),
802 },
803 },
804 "lstm": {
805 "operands": (1, 0),
806 "build_fcn": (TBuilder.LSTM, TGen.tgRecurrent, ArgGen.agNone),
807 "types": {
808 "tflite": [
809 tf.float32,
810 # tf.int32
811 ]
812 },
813 },
814 "gru": {
815 "operands": (1, 0),
816 "build_fcn": (TBuilder.GRU, TGen.tgRecurrent, ArgGen.agNone),
817 "types": {
818 "tflite": [
819 tf.float32,
820 # tf.int32
821 ]
822 },
823 },
824 "rnn": {
825 "operands": (1, 0),
826 "build_fcn": (TBuilder.RNN, TGen.tgRecurrent, ArgGen.agNone),
827 "types": {
828 "tflite": [
829 tf.float32,
830 ]
831 },
832 },
Luke Hutton261b7b62023-01-10 14:50:31 +0000833 "rfft2d": {
834 "operands": (1, 0),
835 "build_fcn": (TBuilder.RFFT2d, TGen.tgRFFT2d, ArgGen.agRFFT2d),
836 "types": {
837 "tflite": TYPE_F,
838 },
839 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000840}
841
842# Shapes to be tested; default can be overwritten
843shape_list = [
844 (1,),
845 (64,),
846 (14, 19),
847 (13, 21, 3),
Luke Hutton261b7b62023-01-10 14:50:31 +0000848 (1, 8, 16),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000849 (1, 4, 4, 4),
850 (1, 8, 4, 17),
851 (1, 4, 8, 19),
852 (1, 32, 32, 8),
853 (1, 7, 7, 9),
TatWai Chong0cef07e2023-02-27 13:22:52 -0800854 (3, 1, 1, 7),
TatWai Chongfd629052022-07-25 04:01:58 +0000855 (2, 2, 7, 7, 2),
856 (1, 4, 8, 21, 17),
857 (3, 32, 16, 16, 5),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000858]
859
860
861def gen_rand_shapes(args):
862 """Overwrite the global shape list with a new list of random shapes"""
863 global shape_list
864
865 rng = np.random.default_rng(args.random_seed)
866
867 # Don't let things get too big... cap the maximum volume, but let
868 # an individual dimension be 1..47
869 max_total_volume = 32 * 32 * 4
870
871 shape_list = []
TatWai Chongfd629052022-07-25 04:01:58 +0000872 # Only iterate over ranks 2, 3, 4, and 5
873 for rank in range(2, 6):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000874 for n in range(args.random_shapes):
875 new_shape = rng.integers(1, 48, size=rank)
876
TatWai Chongfd629052022-07-25 04:01:58 +0000877 # Set the batch dimension on 4D or 5D objects to 1
878 if rank == 4 or rank == 5:
Jeremy Johnson015c3552022-02-23 12:15:03 +0000879 new_shape[0] = 1
880
881 # Limit the total shape volume and throw out any
882 # shapes that wouldn't leave at least size=2 in some non-batch dimension
883 volume = 1
884 skip_shape = False
885 for i in range(rank):
886
887 volume *= new_shape[i]
888
889 # Reduce the shape, while it's larger than the maximum volume
890 while volume > max_total_volume:
891 new_shape[i] = new_shape[i] // 2
892 volume = volume // 2
893
894 # Now an untenable dimension size? Skip this one.
895 if new_shape[i] < 1:
896 skip_shape = True
897
898 if not skip_shape:
899 shape_list.append(tuple(new_shape))
900
901
902# Construct, run and save a whole tensorflow tf.function to a protobuf file
903# or convert to .tflite if it's quantized unit test
904def run_unit_test(
905 op_name,
906 args,
907 test_dir,
908 curr_shape,
909 addl_args,
910 dtype,
911 excluded_framework_list,
912 quantized_inference_dtype,
913 result_name,
914 seed,
915):
916
917 try:
918 op = TF_OP_LIST[op_name]
919 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
920
921 # Get and seed a random number generator for this test
922 rng = np.random.default_rng(seed)
923
924 # return placeholders=(str: name, np.array: value)
925 # consts=(str: name, np.array: value)
926 placeholders, consts = tensor_gen_fcn(op, curr_shape, dtype, rng)
927
928 # if test doesn't have any placeholders/consts, terminated
929 if len(placeholders) == 0 and len(consts) == 0:
930 return True
931
932 if not args.quiet:
933 print(" {} ".format(test_dir))
934
935 try:
936 os.mkdir(test_dir)
937 except FileExistsError:
938 pass
939
940 const_nodes = [value for name, value in consts]
941
942 num_placeholders = len(placeholders)
943 # if test is quantized, create tensor quantization metadata info for
944 # each input tensor, based on different quantized type
945 if quantized_inference_dtype:
946 is_quantized = True
947 # TODO: support INT8 IFM x INT4 weight later
948 if quantized_inference_dtype == QuantType.ALL_U8:
949 qzero = [128] * num_placeholders
950 numpy_dtype = [np.uint8] * num_placeholders
951 tflite_inference_dtype = tf.uint8
952 elif quantized_inference_dtype == QuantType.ALL_I8:
953 qzero = [0] * num_placeholders
954 numpy_dtype = [np.int8] * num_placeholders
955 tflite_inference_dtype = tf.int8
956 elif quantized_inference_dtype == QuantType.ALL_I16:
957 qzero = [0] * num_placeholders
958 numpy_dtype = [np.int16] * num_placeholders
959 tflite_inference_dtype = tf.int16
960 elif quantized_inference_dtype == QuantType.CONV_U8_U8:
961 assert (
962 num_placeholders == 1
963 ), "Unsupported number of placeholders for Convolution: {}".format(
964 num_placeholders
965 )
966 qzero = [128] * num_placeholders
967 if num_placeholders == 2:
968 numpy_dtype = [np.uint8, np.uint8]
969 else:
970 numpy_dtype = [np.uint8, np.uint8, np.int32]
971 tflite_inference_dtype = tf.uint8
972 elif quantized_inference_dtype == QuantType.CONV_I8_I8:
973 assert (
974 num_placeholders == 1
975 ), "Unsupported number of placeholders for Convolution: {}".format(
976 num_placeholders
977 )
978 qzero = [0] * num_placeholders
979 if num_placeholders == 2:
980 numpy_dtype = [np.int8, np.int8]
981 else:
982 numpy_dtype = [np.int8, np.int8, np.int32]
983 tflite_inference_dtype = tf.int8
984 elif quantized_inference_dtype == QuantType.CONV_I16_I8:
985 assert (
986 num_placeholders == 1
987 ), "Unsupported number of placeholders for Convolution: {}".format(
988 num_placeholders
989 )
990 if num_placeholders == 2:
991 qzero = [0, 0]
992 numpy_dtype = [np.int16, np.int8]
993 else:
994 qzero = [0, 0, 0]
995 numpy_dtype = [
996 np.int16,
997 np.int8,
998 np.int64,
999 ] # np.int64 to represent 40 bits accumulator
1000 tflite_inference_dtype = tf.int16
1001 else:
1002 raise Exception(
1003 "Unsupported fakequant dtype: {}".format(quantized_inference_dtype)
1004 )
1005
1006 else:
1007 is_quantized = False
1008
1009 tf_model_filename = None
1010 tf_result_npy_filename = None
1011 tf_result_name = None
1012
1013 tflite_model_filename = None
1014 tflite_result_npy_filename = None
1015 tflite_result_name = None
1016
1017 placeholder_names = []
1018 placeholder_vals = []
1019 placeholder_signatures = ()
1020 placeholder_npy_filenames = []
1021 placeholder_shapes = []
1022
1023 for idx, (name, val) in enumerate(placeholders):
1024 placeholder_names.append(name)
1025 placeholder_signatures = placeholder_signatures + (
1026 tf.TensorSpec(shape=val.shape, dtype=val.dtype, name=name),
1027 )
1028 placeholder_npy_filenames.append("{}.npy".format(name.split(":")[0]))
1029 placeholder_shapes.append(val.shape)
1030
1031 # Get test builder class
1032 fcn_node = op_fcn(*const_nodes, *addl_args, result_name)
1033 concrete_function = tf.function(input_signature=placeholder_signatures)(
1034 fcn_node.eval
1035 ).get_concrete_function()
1036
1037 if is_quantized:
1038
1039 assert dtype is tf.float32, "quantized test must come from float32 graph"
1040
1041 # 1. Quantize float placeholder npy to quantized to feed the graph
1042 for idx, (name, val) in enumerate(placeholders):
1043
1044 # we use np.amin()/np.amax() to determine dynamic range
1045 # for quantized test
1046 zeropoint = 0
1047 scale = 1.0
1048 if numpy_dtype[idx] != np.int64:
1049 qmin = np.iinfo(numpy_dtype[idx]).min
1050 qmax = np.iinfo(numpy_dtype[idx]).max
1051 num_bits = np.iinfo(numpy_dtype[idx]).bits
1052 # 40 bit is represented as np.int64
1053 else:
1054 num_bits = 40
1055 qmin = -(1 << num_bits)
1056 qmax = (1 << num_bits) - 1
1057
1058 min_val = np.amin(val)
1059 max_val = np.amax(val)
1060
1061 # for single value tensor, we set scale equal to the abs(value),
1062 # and fix zeropoint to 128
1063 # if val > 0, it'll be represented as 129,
1064 # where val = (129 - 128) * val
1065 # if val < 0, it'll be represented as 127,
1066 # where val = (127 - 128) * (-val)
1067 # if val == 0, it'll be represted as 128, with range [-128.0, 128.0]
1068 # and let quantized 1 represent the value
1069 # also adjust effective min/max consequently
1070 if max_val == min_val:
1071 if max_val != 0:
1072 scale = abs(max_val)
1073 else:
1074 scale = 1.0
1075 min_val = float(qmin - qzero[idx]) * scale
1076 max_val = float(qmax - qzero[idx]) * scale
1077 else:
1078 scale = (max_val - min_val) / float(qmax - qmin)
1079 zeropoint = int(round((-min_val) / scale)) + qmin
1080
1081 # run through tf.fakequant first to assure quantization error aligned
1082 fakequant_val = tf.quantization.fake_quant_with_min_max_args(
1083 val,
1084 min=min_val,
1085 max=max_val,
1086 num_bits=num_bits,
1087 name="gen_quant_npy",
1088 )
1089
1090 quant_val = np.round(fakequant_val / scale).astype(np.int32) + zeropoint
1091
1092 # very few unit tests after TF hash may/2020, this quantized
1093 # value for some reason exceed [0, 255] range
1094 saved_val = np.clip(quant_val, qmin, qmax).astype(numpy_dtype[idx])
1095
1096 # saved all quantized tensor as np.int32
1097 # since TOSA numpy Cpp API only supports int32
1098 np.save(
1099 os.path.join(test_dir, placeholder_npy_filenames[idx]),
1100 saved_val.astype(np.int32),
1101 False,
1102 )
1103
1104 placeholder_vals.append(tf.convert_to_tensor(saved_val))
1105
1106 # 2. Convert the model to quantized TFLite flatbuffer
1107 module = tf.Module()
1108 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1109 [concrete_function], module
1110 )
1111 converter.optimizations = [tf.lite.Optimize.DEFAULT]
1112 converter.experimental_new_converter = True
1113
1114 # use MLIR-based post-quantizer
1115 converter.experimental_new_quantizer = True
1116
1117 flag = (
1118 tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 # noqa: E501
1119 )
1120 if tflite_inference_dtype == tf.int16:
1121 converter.target_spec.supported_ops = [flag]
1122
1123 def input_stats():
1124 for i in range(0, args.num_samples):
1125 a = [
1126 TGen.getRand(shape, tf.float32, rng)
1127 for shape in placeholder_shapes
1128 ]
1129 yield a
1130
1131 converter.representative_dataset = input_stats
1132 converter.inference_input_type = tflite_inference_dtype
1133 converter.inference_output_type = tflite_inference_dtype
1134
1135 tflite_model = converter.convert()
1136
1137 tflite_model_filename = "model.tflite"
1138
1139 # Write out converted model to disk
1140 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1141 f.write(tflite_model)
1142
1143 else: # is_quantized is False
1144
1145 # 1. Saved out numpy array directly
1146 for idx, (name, val) in enumerate(placeholders):
1147 placeholder_vals.append(tf.convert_to_tensor(val))
1148 np.save(
1149 os.path.join(test_dir, placeholder_npy_filenames[idx]), val, False
1150 )
1151
1152 # 2.a Saved out .pb if framework includes tensorflow
1153 if "tf" not in excluded_framework_list:
1154 # Write out graph as protobuf to disk
1155 tf_model_filename = "model.pb"
1156 tf.io.write_graph(
1157 concrete_function.graph, test_dir, tf_model_filename, True
1158 )
1159
1160 # 2.b Saved out .tflite if framework includes tflite
1161 if "tflite" not in excluded_framework_list:
1162 # Convert the model to TFLite flatbuffer
1163 module = tf.Module()
1164 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1165 [concrete_function], module
1166 )
1167
1168 converter.experimental_new_converter = True
1169
1170 # Even it's non-quantized int32 test, this needs to be set to tf.float32
1171 converter.inference_input_type = tf.float32
1172 converter.inference_output_type = tf.float32
1173 tflite_model = converter.convert()
1174
1175 # Write out converted model to disk
1176 tflite_model_filename = "model.tflite"
1177 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1178 f.write(tflite_model)
1179
1180 # Get TF reference result if .pb is specified
1181 if tf_model_filename:
1182 tf_result_npy_filename = "tf_result.npy"
1183 tf_result = concrete_function(*placeholder_vals)
1184 np.save(os.path.join(test_dir, tf_result_npy_filename), tf_result, False)
1185
1186 tf_result_name = result_name
1187
1188 # Get TFLite inference result if .tflite is specified
1189 if tflite_model_filename:
1190 tflite_result_npy_filename = "tflite_result.npy"
1191
Luke Hutton5c844212023-01-27 14:17:52 +00001192 ops_with_optimized_only_kernel = ["elu", "ceil", "gather", "rfft2d"]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001193
1194 if args.tflite_kernel_mode == "optimized" or (
1195 op_name in ops_with_optimized_only_kernel
1196 ):
1197 interpreter = tf.lite.Interpreter(
1198 model_path=os.path.join(test_dir, tflite_model_filename)
1199 )
1200 elif args.tflite_kernel_mode == "reference":
1201 interpreter = tf.lite.Interpreter(
1202 model_path=os.path.join(test_dir, tflite_model_filename),
1203 experimental_op_resolver_type=OpResolverType.BUILTIN_REF,
1204 )
1205 else:
1206 assert 0, "unknown tflite interpreter mode {}".format(
1207 args.tflite_kernel_mode
1208 )
1209 interpreter.allocate_tensors()
1210
1211 input_details = interpreter.get_input_details()
1212 output_details = interpreter.get_output_details()
1213
1214 assert len(input_details) == len(
1215 placeholder_vals
1216 ), "number of placeholder mismatch"
1217
1218 for idx, val in enumerate(placeholder_vals):
1219 interpreter.set_tensor(input_details[idx]["index"], val.numpy())
1220
1221 interpreter.invoke()
1222 tflite_result = interpreter.get_tensor(output_details[0]["index"])
1223
1224 np.save(
1225 os.path.join(test_dir, tflite_result_npy_filename), tflite_result, False
1226 )
1227
1228 # Result tensor name would change after converting to TFLite flatbuffer
1229 # Overwrite the information from TFLite models directly.
1230 # Assume single result tensor now
1231 tflite_result_name = output_details[0]["name"]
1232
1233 # Write out test descriptor
1234 write_test_json(
1235 filename=os.path.join(test_dir, "test.json"),
1236 tf_model_filename=tf_model_filename,
1237 tf_result_npy_filename=tf_result_npy_filename,
1238 tf_result_name=tf_result_name,
1239 tflite_model_filename=tflite_model_filename,
1240 tflite_result_npy_filename=tflite_result_npy_filename,
1241 tflite_result_name=tflite_result_name,
1242 ifm_name=placeholder_names,
1243 ifm_file=placeholder_npy_filenames,
1244 ifm_shape=placeholder_shapes,
1245 framework_exclusions=excluded_framework_list,
1246 quantized=is_quantized,
1247 )
1248 except Exception as e:
1249 msg = "Error running task: {}".format(e)
1250 print(msg)
1251 print(
1252 "".join(
1253 traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)
1254 )
1255 )
1256 return False
1257 return True
1258
1259
1260def build_const_net(
1261 args,
1262 curr_shape,
1263 op_name,
1264 dtype,
1265 excluded_framework_list,
1266 quantized_inference_dtype,
1267 result_name,
1268 seed,
1269 rng,
1270 filter,
1271 unit_test_args,
1272):
1273
1274 if quantized_inference_dtype:
1275 quant_dtype = get_tf_dtype(quantized_inference_dtype)
1276 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, quant_dtype))
1277 else:
1278 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, dtype))
1279 test_dir = os.path.join(args.output_dir, test_dir)
1280
1281 # If the operator has an additional function to generate arguments, call it
1282 # here and iterate through the argument list that it generates
1283 op = TF_OP_LIST[op_name]
1284 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1285
TatWai Chongfd629052022-07-25 04:01:58 +00001286 try:
1287 rank_lo, rank_hi = op["rank"]
1288 except KeyError:
1289 # Set testing rank to (1, 4) in default.
1290 rank_lo = 1
1291 rank_hi = 4
1292
1293 if len(curr_shape) not in range(rank_lo, rank_hi + 1):
1294 return
1295
Jeremy Johnson015c3552022-02-23 12:15:03 +00001296 addl_args_tuple = arg_gen_fcn(op, curr_shape, rng)
1297 for desc, addl_args in addl_args_tuple:
Jeremy Johnson0e6218e2022-05-05 17:08:04 +01001298 # Only filter on the full test_name, not the output directory
1299 _, test_name = os.path.split(test_dir + desc)
1300 if not filter or filter.search(test_name):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001301 unit_test_args.append(
1302 [
1303 op_name,
1304 args,
1305 test_dir + desc,
1306 curr_shape,
1307 addl_args,
1308 dtype,
1309 excluded_framework_list,
1310 quantized_inference_dtype,
1311 result_name,
1312 seed,
1313 ]
1314 )
1315
1316
1317# python hash is not reproducible, create hash for our purpose
1318def op_name_hash(op_name):
1319 result = 0xDEADBEEF
1320 for ch in op_name:
1321 if result & 1:
1322 result = (ord(ch) << 24) ^ (result >> 1) ^ 0x82608EDB
1323 else:
1324 result = (ord(ch) << 24) ^ (result >> 1)
1325
1326 return result
1327
1328
1329def generate_op_tests(args, op_name, shape_list, result_name, filter, unit_test_args):
1330
1331 if not args.quiet:
1332 print(
1333 "Generating tests for {} ".format(
1334 op_name
1335 )
1336 )
1337
1338 op = TF_OP_LIST[op_name]
1339
1340 # Seed the RNG so that we get the same random tests for each test each time
1341 # If the number of tests for a given generation function changes, the tests
1342 # for that operator may also change accordingly, but this will at least keep
1343 # down churn across operators.
1344
1345 bounded_hash_val = (args.random_seed + op_name_hash(op_name)) % np.iinfo(
1346 np.int32
1347 ).max
1348 rng = np.random.default_rng(bounded_hash_val)
1349
1350 # this is a dictionary with 'tf' and 'tflite' as key
1351 # and value being the data types we want to test under these framework
1352
1353 if isinstance(op["types"], dict):
1354 try:
1355 tf_dtypes = op["types"]["tf"]
1356 except KeyError:
1357 tf_dtypes = []
1358 try:
1359 tflite_dtypes = op["types"]["tflite"]
1360 except KeyError:
1361 tflite_dtypes = []
1362 elif isinstance(op["types"], list):
1363 tf_dtypes = op["types"]
1364 tflite_dtypes = op["types"]
1365
1366 tf_nonquantized_dtypes = tf_dtypes # tf doesn't support quantized data types
1367 tflite_quantized_dtypes = []
1368 tflite_nonquantized_dtypes = []
1369 for dtype in tflite_dtypes:
1370 if isinstance(dtype, QuantType):
1371 tflite_quantized_dtypes.append(dtype)
1372 else:
1373 tflite_nonquantized_dtypes.append(dtype)
1374
1375 nonquantized_dtypes_set = set(tf_nonquantized_dtypes).union(
1376 set(tflite_nonquantized_dtypes)
1377 )
1378 nonquantized_dtypes = list(nonquantized_dtypes_set)
1379 quantized_dtypes = tflite_quantized_dtypes
1380
1381 # populate non quantized unit test arguments
1382 for dtype in nonquantized_dtypes:
1383
1384 excluded_framework_set = set(ALL_FRAMEWORKS)
1385 if dtype in tf_nonquantized_dtypes:
1386 excluded_framework_set.remove("tf")
1387 if dtype in tflite_nonquantized_dtypes:
1388 excluded_framework_set.remove("tflite")
1389 excluded_framework_list = list(excluded_framework_set)
1390
1391 for curr_shape in shape_list:
1392 build_const_net(
1393 args,
1394 curr_shape,
1395 op_name,
1396 dtype,
1397 excluded_framework_list,
1398 None,
1399 result_name,
1400 bounded_hash_val,
1401 rng,
1402 filter,
1403 unit_test_args,
1404 )
1405
1406 # populate quantized unit test arguments
1407 # must exclude 'tf' and source dtype being tf.float32
1408 for dtype in quantized_dtypes:
1409 for curr_shape in shape_list:
1410 build_const_net(
1411 args,
1412 curr_shape,
1413 op_name,
1414 tf.float32,
1415 ["tf"],
1416 dtype,
1417 result_name,
1418 bounded_hash_val,
1419 rng,
1420 filter,
1421 unit_test_args,
1422 )
1423
1424 return unit_test_args
1425
1426
1427def createDynamicOpLists():
1428 """The templated operators are conv2d-style operators with a number of kernel
1429 sizes. Since the operator is unchanged, we generate the range of kernel
1430 sizes here in this loop and remove the original templates from the list.
1431
1432 This could be expanded to non-conv2d-style operators in the future."""
1433
1434 # Dynamically create op lists for convolutions with a list of kernel sizes
1435 KERNELS = [
1436 [1, 1],
1437 [3, 3],
1438 [5, 5],
1439 ]
1440
TatWai Chongfd629052022-07-25 04:01:58 +00001441 # dim = [D, H, W]
1442 KERNELS_3D = [
1443 [1, 1, 1],
1444 [2, 3, 3],
1445 [3, 5, 5],
1446 ]
1447
Jeremy Johnson015c3552022-02-23 12:15:03 +00001448 TEMPLATE_LIST = [
1449 "conv2d",
1450 "conv2d_bias",
1451 "conv2d_relu",
1452 "conv2d_relu6",
1453 "conv2d_relu_n1_to_1",
1454 "conv2d_tanh",
1455 "depthwise_conv2d",
1456 "depthwise_conv2d_bias",
1457 "transpose_conv2d",
1458 ]
1459
TatWai Chongfd629052022-07-25 04:01:58 +00001460 TEMPLATE_LIST_CONV3D = [
1461 "conv3d",
1462 "conv3d_bias",
1463 ]
1464
Jeremy Johnson015c3552022-02-23 12:15:03 +00001465 for t in TEMPLATE_LIST:
1466 for k in KERNELS:
1467 testName = "{}_{}x{}".format(t, k[0], k[1])
1468 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1469 TF_OP_LIST[testName]["filter"] = k
1470 TF_OP_LIST[testName]["template"] = False
1471
TatWai Chongfd629052022-07-25 04:01:58 +00001472 # The existing operators don't support the dimension of kernel that is higher than 2.
1473 for t in TEMPLATE_LIST_CONV3D:
1474 for k in KERNELS_3D:
1475 testName = "{}_{}x{}x{}".format(t, k[0], k[1], k[2])
1476 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1477 TF_OP_LIST[testName]["filter"] = k
1478 TF_OP_LIST[testName]["template"] = False
1479
Jeremy Johnson015c3552022-02-23 12:15:03 +00001480 # Delete any templates after having created any dynamic ops
1481 # This is a two-pass operation because it's bad practice to delete
1482 # keys from dictionaries while iterating
1483 keyList = []
1484 for k in TF_OP_LIST:
1485 try:
1486 if TF_OP_LIST[k]["template"]:
1487 keyList.append(k)
1488 continue
1489 except KeyError:
1490 pass
1491
1492 for k in keyList:
1493 del TF_OP_LIST[k]
1494
1495
1496def main():
1497 parser = argparse.ArgumentParser()
1498 parser.add_argument(
1499 "--seed", dest="random_seed", default=42, type=int, help="Random seed"
1500 )
1501 parser.add_argument(
1502 "--random-shapes",
1503 dest="random_shapes",
1504 default=0,
1505 type=int,
1506 help=(
1507 "Use N random shapes of each rank for generating tests,"
1508 "seeded with random seed"
1509 ),
1510 )
1511 parser.add_argument(
1512 "-o",
1513 "--output-dir",
1514 dest="output_dir",
1515 default=".",
1516 type=str,
1517 help="Test output directory path prefix",
1518 )
1519 parser.add_argument(
1520 "-q",
1521 "--quiet",
1522 dest="quiet",
1523 default=False,
1524 action="store_true",
1525 help="Do not print test names",
1526 )
1527 parser.add_argument(
1528 "-j", "--jobs", dest="jobs", type=int, default=1, help="Number of parallel jobs"
1529 )
1530 parser.add_argument(
1531 "-m",
1532 "--tflite-kernel-mode",
1533 dest="tflite_kernel_mode",
1534 type=str,
1535 choices=["reference", "optimized"],
1536 default="reference",
1537 help="TFLite interpreter kernel mode",
1538 )
1539 parser.add_argument(
1540 "--num-samples",
1541 dest="num_samples",
1542 default=200,
1543 type=int,
1544 help="Number of input samples for post-training quantization",
1545 )
1546 parser.add_argument(
1547 "--filter",
1548 dest="filter",
1549 default="",
1550 type=str,
1551 help="Filter test names by this expression",
1552 )
1553 args = parser.parse_args()
1554
1555 # Turn the filter into a re object if present
1556 filter = None
1557 if args.filter != "":
1558 filter = re.compile(args.filter)
1559
1560 # Autodetect CPU count
1561 if args.jobs <= 0:
1562 args.jobs = os.cpu_count()
1563
1564 # Disable TF info messages
1565 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
1566
1567 try:
1568 os.makedirs(args.output_dir)
1569 except FileExistsError:
1570 pass
1571
1572 if args.random_shapes:
1573 gen_rand_shapes(args)
1574
1575 # Build dynamic ops
1576 createDynamicOpLists()
1577
1578 # Generate the test list and arguments to run_unit_test()
1579 unit_test_args = []
1580
1581 for op in TF_OP_LIST:
1582 generate_op_tests(args, op, shape_list, "result", filter, unit_test_args)
1583
1584 errors = 0
1585 for t in unit_test_args:
1586 if not run_unit_test(*t):
1587 errors = errors + 1
1588
1589 if not args.quiet:
1590 print("\nAll tasks done - with {} errors".format(errors))
1591
1592 return 1 if errors else 0
1593
1594
1595if __name__ == "__main__":
1596 exit(main())