blob: 02ab8aa7a31c4d63e3f775b088d8dffaf1779ff6 [file] [log] [blame]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001#!/usr/bin/env python3
Luke Hutton261b7b62023-01-10 14:50:31 +00002# Copyright (c) 2020-2023, ARM Limited.
Jeremy Johnson015c3552022-02-23 12:15:03 +00003# SPDX-License-Identifier: Apache-2.0
4import argparse
5import os
6import re
7import traceback
8
9import numpy as np
10
11# Level | Level for Humans | Level Description
12# -------|------------------|------------------------------------
13# 0 | DEBUG | [Default] Print all messages
14# 1 | INFO | Filter out INFO messages
15# 2 | WARNING | Filter out INFO & WARNING messages
16# 3 | ERROR | Filter out all messages
17# Filter tensorflow debug message except errors
18os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
19
20# Flake8 E402 - ignore imports not at top of file to allow os.environ setting
21import tensorflow as tf # noqa: E402
22from frameworks.write_test_json import write_test_json # noqa: E402
23from frameworks.arg_gen import ArgGen # noqa: E402
24from frameworks.tensor_gen import TGen # noqa: E402
Jerry Geb1f25012023-03-03 11:33:51 -080025from frameworks.tensor_gen import ElemSignedness # noqa: E402
Jeremy Johnson015c3552022-02-23 12:15:03 +000026from frameworks.test_builder import TBuilder # noqa: E402
Jeremy Johnson5d1a3472022-03-31 09:50:06 +010027from frameworks.test_gen_utils import ( # noqa: E402
Jeremy Johnson015c3552022-02-23 12:15:03 +000028 QuantType,
29 get_tf_dtype,
30 get_shape_str,
31) # noqa: E402
32from tensorflow.lite.python.interpreter import OpResolverType # noqa: E402
33
34# All of the supported frameworks
35ALL_FRAMEWORKS = ["tf", "tflite"]
36
37# Lists of different data types
38TYPE_F = [tf.float32]
39TYPE_I = [tf.int32]
40TYPE_FI = [tf.float32, tf.int32]
41TYPE_B = [tf.bool]
42TYPE_FIB = [tf.float32, tf.int32, tf.bool]
43TYPE_H = [tf.float16]
44TYPE_FH = [tf.float32, tf.float16]
45TYPE_FHI = [tf.float32, tf.float16, tf.int32]
46TYPE_FHIB = [tf.float32, tf.float16, tf.int32, tf.bool]
47
48# The list of operator tests
49# Each dictionary entry for an op is a dictionary with the following required members:
50# 'operands': tuple (number_of_placeholder_tensors, number_of_constant_tensors)
51# 'build_fcn: tuple (Test builder function, Tensor generator function,
52# Argument generator function)
53# 'types': list of Tensorflow types that should be tested for this op
54# OR
55# a dictionary of {'framework_name': [type_list] } for cases where only
56# a subset of the types should be tested in each framework. This can also
57# be used to restrict an operator to a particular framework.
58#
59# And optional members:
Jerry Ge5dd5a552023-05-23 22:41:20 +000060# 'template': boolean (indicates that this is a templated op which gets further
61# processing in createDynamicOpLists)
62# 'bias': boolean indicating that there is a bias component to be generated
63# 'qtypes': List of QuantType quantized types to generate for this op
64# 'rank': tuple (lowest rank, highest rank). Dimension range of input tensor.
65# 'custom_shapes': List of custom shapes for specific operators
Jeremy Johnson015c3552022-02-23 12:15:03 +000066
67TF_OP_LIST = {
68 "add": {
69 "operands": (2, 0),
70 "build_fcn": (TBuilder.Add, TGen.tgBFuzz, ArgGen.agNone),
71 "types": {
72 "tf": TYPE_FI,
73 "tflite": list(
74 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
75 ),
76 },
77 },
78 "sub": {
79 "operands": (2, 0),
80 "build_fcn": (TBuilder.Sub, TGen.tgBFuzz, ArgGen.agNone),
81 "types": {
82 "tf": TYPE_FI,
83 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
84 # QuantType.ALL_I16 fail in TFLite conversion
85 },
86 },
87 "mul": {
88 "operands": (2, 0),
89 "build_fcn": (TBuilder.Mul, TGen.tgBFuzz, ArgGen.agNone),
90 "types": {
91 "tf": TYPE_FI,
92 "tflite": list(
93 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
94 ),
95 },
96 },
97 "exp": {
98 "operands": (1, 0),
99 "build_fcn": (TBuilder.Exp, TGen.tgBasic, ArgGen.agNone),
100 "types": TYPE_F,
101 },
102 "rcp": {
103 "operands": (1, 0),
104 "build_fcn": (TBuilder.Rcp, TGen.tgBasic, ArgGen.agNone),
105 "types": TYPE_F,
106 },
107 "relu": {
108 "operands": (1, 0),
109 "build_fcn": (TBuilder.Relu, TGen.tgBasic, ArgGen.agNone),
110 "types": {
111 "tf": TYPE_F,
112 "tflite": list(
113 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
114 ),
115 },
116 },
Jerry Ge93912432022-07-22 10:29:13 -0700117 "relu1": {
118 "operands": (1, 0),
119 "build_fcn": (TBuilder.Relu1, TGen.tgBasic, ArgGen.agNone),
120 "types": {
121 "tf": [],
122 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
123 },
124 },
Jerry Ge2eea5bf2022-10-11 16:27:05 +0000125 "relu0To1": {
126 "operands": (1, 0),
127 "build_fcn": (TBuilder.Relu0To1, TGen.tgBasic, ArgGen.agNone),
128 "types": {
129 "tf": [],
130 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
131 },
132 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000133 "relu6": {
134 "operands": (1, 0),
135 "build_fcn": (TBuilder.Relu6, TGen.tgBasic, ArgGen.agNone),
136 "types": {
137 "tf": TYPE_F,
138 "tflite": list(
139 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
140 ),
141 },
142 },
143 "leaky_relu": {
144 "operands": (1, 0),
145 "build_fcn": (TBuilder.LeakyRelu, TGen.tgBasic, ArgGen.agFloat),
146 "types": {
147 "tf": TYPE_F,
148 "tflite": list(
149 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
150 ),
151 },
152 },
TatWai Chong41a04fe2022-11-03 21:44:32 +0000153 "prelu": {
154 "operands": (1, 0),
155 "build_fcn": (TBuilder.Prelu, TGen.tgBasic, ArgGen.agNone),
156 "types": {
157 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
158 },
159 },
TatWai Chong473eb382022-08-02 04:21:30 +0000160 "gelu": {
161 "operands": (1, 0),
162 "build_fcn": (TBuilder.Gelu, TGen.tgBasic, ArgGen.agNone),
163 "types": {
164 # Need compiler support for tf.Erf.
165 # "tf": TYPE_F,
166 "tflite": list(
167 # Only float32, int8 and uint8 supported currently
168 TYPE_F
169 + [QuantType.ALL_U8, QuantType.ALL_I8]
170 ),
171 },
172 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000173 "concat": {
174 "operands": (2, 0),
175 "build_fcn": (TBuilder.Concat, TGen.tgBasic, ArgGen.agAxes),
176 "types": TYPE_FI,
177 },
178 "bitwise_and": {
179 "operands": (2, 0),
180 "build_fcn": (TBuilder.BitwiseAnd, TGen.tgBFuzz, ArgGen.agNone),
181 "types": {"tf": TYPE_I}, # Not supported in TF Lite
182 },
183 "bitwise_or": {
184 "operands": (2, 0),
185 "build_fcn": (TBuilder.BitwiseOr, TGen.tgBFuzz, ArgGen.agNone),
186 "types": {"tf": TYPE_I}, # Not supported in TF Lite
187 },
188 "bitwise_not": {
189 "operands": (1, 0),
190 "build_fcn": (TBuilder.BitwiseNot, TGen.tgBFuzz, ArgGen.agNone),
191 "types": {"tf": TYPE_I}, # Not supported in TF Lite
192 },
193 "bitwise_xor": {
194 "operands": (2, 0),
195 "build_fcn": (TBuilder.BitwiseXor, TGen.tgBFuzz, ArgGen.agNone),
196 "types": {"tf": TYPE_I}, # Not supported in TF Lite
197 },
198 "logical_and": {
199 "operands": (2, 0),
200 "build_fcn": (TBuilder.LogicalAnd, TGen.tgBFuzz, ArgGen.agNone),
201 "types": TYPE_B,
202 },
203 "logical_or": {
204 "operands": (2, 0),
205 "build_fcn": (TBuilder.LogicalOr, TGen.tgBFuzz, ArgGen.agNone),
206 "types": TYPE_B,
207 },
208 "logical_not": {
209 "operands": (1, 0),
210 "build_fcn": (TBuilder.LogicalNot, TGen.tgBFuzz, ArgGen.agNone),
211 "types": TYPE_B,
212 },
213 "reduce_any": {
214 "operands": (1, 0),
215 "build_fcn": (TBuilder.ReduceAny, TGen.tgBasic, ArgGen.agAxesListKeepdims),
216 "types": TYPE_B,
217 },
218 "reduce_all": {
219 "operands": (1, 0),
220 "build_fcn": (TBuilder.ReduceAll, TGen.tgBasic, ArgGen.agAxesListKeepdims),
TatWai Chong6ad6d6e2022-11-11 15:28:50 -0800221 "types": TYPE_B,
Jeremy Johnson015c3552022-02-23 12:15:03 +0000222 },
223 "reduce_min": {
224 "operands": (1, 0),
225 "build_fcn": (TBuilder.ReduceMin, TGen.tgBasic, ArgGen.agAxesListKeepdims),
226 "types": {
227 "tf": TYPE_FI,
228 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
229 },
230 },
231 "reduce_max": {
232 "operands": (1, 0),
233 "build_fcn": (TBuilder.ReduceMax, TGen.tgBasic, ArgGen.agAxesListKeepdims),
234 "types": {
235 "tf": TYPE_FI,
236 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
237 },
238 },
239 "reduce_sum": {
240 "operands": (1, 0),
241 "build_fcn": (TBuilder.ReduceSum, TGen.tgBasic, ArgGen.agAxesListKeepdims),
242 "types": {
243 "tf": TYPE_F,
244 # v2 converter doesn't recognize quantized reduce_sum
245 # "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
246 "tflite": TYPE_F,
247 },
248 },
249 "reduce_mean": {
250 "operands": (1, 0),
251 "build_fcn": (TBuilder.ReduceMean, TGen.tgBasic, ArgGen.agAxesListKeepdims),
252 "types": {
253 "tf": TYPE_F,
254 "tflite": list(
255 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
256 ),
257 },
258 },
259 "reduce_product": {
260 "operands": (1, 0),
261 "build_fcn": (TBuilder.ReduceProduct, TGen.tgBasic, ArgGen.agAxesListKeepdims),
262 "types": TYPE_F,
263 },
264 "min": {
265 "operands": (2, 0),
266 "build_fcn": (TBuilder.Min, TGen.tgBFuzz, ArgGen.agNone),
267 "types": TYPE_FI,
268 },
269 "max": {
270 "operands": (2, 0),
271 "build_fcn": (TBuilder.Max, TGen.tgBFuzz, ArgGen.agNone),
272 "types": TYPE_FI,
273 },
274 "pow": {
275 "operands": (2, 0),
276 "build_fcn": (TBuilder.Pow, TGen.tgBFuzz, ArgGen.agNone),
277 # Technically, integer is supported, but only for positive exponents.
278 # Needs a random argument generator.
279 "types": TYPE_F,
280 },
281 "abs": {
282 "operands": (1, 0),
283 "build_fcn": (TBuilder.Abs, TGen.tgBasic, ArgGen.agNone),
284 "types": TYPE_F,
285 },
286 "ceil": {
287 "operands": (1, 0),
288 "build_fcn": (TBuilder.Ceil, TGen.tgBasic, ArgGen.agNone),
289 "types": TYPE_F,
290 },
291 "floor": {
292 "operands": (1, 0),
293 "build_fcn": (TBuilder.Floor, TGen.tgBasic, ArgGen.agNone),
294 "types": TYPE_F,
295 },
296 "log": {
297 "operands": (1, 0),
298 "build_fcn": (TBuilder.Log, TGen.tgBasic, ArgGen.agNone),
299 "types": TYPE_F,
300 },
301 "negate": {
302 "operands": (1, 0),
303 "build_fcn": (TBuilder.Negate, TGen.tgBasic, ArgGen.agNone),
304 "types": TYPE_F,
305 },
306 "rsqrt": {
307 "operands": (1, 0),
Jerry Geb1f25012023-03-03 11:33:51 -0800308 "build_fcn": (TBuilder.Rsqrt, TGen.tgBasicPositive, ArgGen.agNone),
309 "types": {
310 "tf": TYPE_F,
311 "tflite": list(TYPE_F + [QuantType.ALL_I8]),
312 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000313 },
TatWai Chongd713a4d2022-11-10 13:54:28 -0800314 "sign": {
315 "operands": (1, 0),
316 "build_fcn": (TBuilder.Sign, TGen.tgBasic, ArgGen.agNone),
317 "types": {
318 "tf": TYPE_F,
319 },
320 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000321 "sigmoid": {
322 "operands": (1, 0),
323 "build_fcn": (TBuilder.Sigmoid, TGen.tgBasic, ArgGen.agNone),
324 "types": {
325 "tf": TYPE_F,
326 "tflite": list(
327 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
328 ),
329 },
330 },
331 "tanh": {
332 "operands": (1, 0),
333 "build_fcn": (TBuilder.Tanh, TGen.tgBasic, ArgGen.agNone),
334 "types": {
335 "tf": TYPE_F,
336 "tflite": list(
337 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
338 ),
339 },
340 },
Won Jeon78155c62023-06-10 00:20:04 +0000341 "erf": {
342 "operands": (1, 0),
343 "build_fcn": (TBuilder.Erf, TGen.tgBasic, ArgGen.agNone),
344 "types": {
345 "tf": TYPE_F,
346 },
347 },
Luke Hutton41601862022-12-06 17:29:15 +0000348 "sin": {
349 "operands": (1, 0),
350 "build_fcn": (TBuilder.Sin, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000351 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000352 },
353 "cos": {
354 "operands": (1, 0),
355 "build_fcn": (TBuilder.Cos, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000356 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000357 },
Luke Hutton2138a192022-12-15 11:01:39 +0000358 "atan2": {
359 "operands": (2, 0),
360 "build_fcn": (TBuilder.Atan2, TGen.tgBasic, ArgGen.agNone),
361 "types": {
362 "tflite": TYPE_F,
363 },
364 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000365 "square": {
366 "operands": (1, 0),
367 "build_fcn": (TBuilder.Square, TGen.tgBasic, ArgGen.agNone),
368 "types": TYPE_F,
369 },
370 "squared_difference": {
371 "operands": (2, 0),
372 "build_fcn": (TBuilder.SquaredDifference, TGen.tgBFuzz, ArgGen.agNone),
373 "types": TYPE_F,
374 },
375 "equal": {
376 "operands": (2, 0),
377 "build_fcn": (TBuilder.Equal, TGen.tgBFuzz, ArgGen.agNone),
378 "types": TYPE_FI,
379 },
380 "greater_equal": {
381 "operands": (2, 0),
382 "build_fcn": (TBuilder.GreaterEqual, TGen.tgBFuzz, ArgGen.agNone),
383 "types": TYPE_FI,
384 },
385 "greater": {
386 "operands": (2, 0),
387 "build_fcn": (TBuilder.Greater, TGen.tgBFuzz, ArgGen.agNone),
388 "types": TYPE_FI,
389 },
390 "less": {
391 "operands": (2, 0),
392 "build_fcn": (TBuilder.Less, TGen.tgBFuzz, ArgGen.agNone),
393 "types": TYPE_FI,
394 },
395 "less_equal": {
396 "operands": (2, 0),
397 "build_fcn": (TBuilder.LessEqual, TGen.tgBFuzz, ArgGen.agNone),
398 "types": TYPE_FI,
399 },
400 "conv2d_TEMPLATE": {
401 "operands": (1, 1),
402 "build_fcn": (TBuilder.Conv2d, TGen.tgConv2d, ArgGen.agConv2d),
403 "types": {
404 "tf": [tf.float32],
405 "tflite": [
406 tf.float32,
407 QuantType.CONV_U8_U8,
408 QuantType.CONV_I8_I8,
409 QuantType.CONV_I16_I8,
410 ],
411 },
412 "template": True,
413 },
414 "conv2d_relu_TEMPLATE": {
415 "operands": (1, 2),
416 "build_fcn": (TBuilder.Conv2dRelu, TGen.tgConv2d, ArgGen.agNone),
417 "types": {
418 "tf": [tf.float32],
419 "tflite": [
420 tf.float32,
421 QuantType.CONV_U8_U8,
422 QuantType.CONV_I8_I8,
423 QuantType.CONV_I16_I8,
424 ],
425 },
426 "template": True,
427 },
428 "conv2d_relu6_TEMPLATE": {
429 "operands": (1, 2),
430 "build_fcn": (TBuilder.Conv2dRelu6, TGen.tgConv2d, ArgGen.agNone),
431 "types": {
432 "tf": [tf.float32],
433 "tflite": [
434 tf.float32,
435 QuantType.CONV_U8_U8,
436 QuantType.CONV_I8_I8,
437 QuantType.CONV_I16_I8,
438 ],
439 },
440 "template": True,
441 },
442 "conv2d_relu_n1_to_1_TEMPLATE": {
443 "operands": (1, 2),
444 "build_fcn": (TBuilder.Conv2dReluN1To1, TGen.tgConv2d, ArgGen.agNone),
445 "types": {
446 "tf": [tf.float32],
447 "tflite": [
448 tf.float32,
449 QuantType.CONV_U8_U8,
450 QuantType.CONV_I8_I8,
451 QuantType.CONV_I16_I8,
452 ],
453 },
454 "template": True,
455 },
456 # This test is converted as:
457 # tfl.conv2d(){fused_activation_function="NONE"} + tfl.tanh()
458 # TODO: anyway to generate tfl.conv2d(){fused_activation_function="TANH"}?
459 "conv2d_tanh_TEMPLATE": {
460 "operands": (1, 2),
461 "build_fcn": (TBuilder.Conv2dTanh, TGen.tgConv2d, ArgGen.agNone),
462 "types": {
463 "tf": [tf.float32],
464 "tflite": [
465 tf.float32,
466 QuantType.CONV_U8_U8,
467 QuantType.CONV_I8_I8,
468 QuantType.CONV_I16_I8,
469 ],
470 },
471 "template": True,
472 },
473 "conv2d_bias_TEMPLATE": {
474 "operands": (1, 2),
475 "build_fcn": (TBuilder.Conv2dWithBias, TGen.tgConv2d, ArgGen.agConv2d),
476 "types": {
477 "tf": [tf.float32],
478 "tflite": [
479 tf.float32,
480 QuantType.CONV_U8_U8,
481 QuantType.CONV_I8_I8,
482 QuantType.CONV_I16_I8,
483 ],
484 },
485 "bias": True,
486 "template": True,
487 },
TatWai Chongfd629052022-07-25 04:01:58 +0000488 "conv3d_TEMPLATE": {
489 "operands": (1, 1),
490 "build_fcn": (TBuilder.Conv3d, TGen.tgConv3d, ArgGen.agConv3d),
491 "types": {
492 "tf": [tf.float32],
493 "tflite": [
494 tf.float32,
495 QuantType.CONV_U8_U8,
496 QuantType.CONV_I8_I8,
497 # Quantization to 16x8-bit not yet supported by tflite.
498 ],
499 },
500 "template": True,
501 "rank": (1, 5),
502 },
503 "conv3d_bias_TEMPLATE": {
504 "operands": (1, 2),
505 "build_fcn": (TBuilder.Conv3dWithBias, TGen.tgConv3d, ArgGen.agConv3d),
506 "types": {
507 "tf": [tf.float32],
508 "tflite": [
509 tf.float32,
510 QuantType.CONV_U8_U8,
511 QuantType.CONV_I8_I8,
512 # Quantization to 16x8-bit not yet supported by tflite.
513 ],
514 },
515 "bias": True,
516 "template": True,
517 "rank": (1, 5),
518 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000519 "depthwise_conv2d_TEMPLATE": {
520 "operands": (1, 1),
521 "build_fcn": (
522 TBuilder.DepthwiseConv2d,
523 TGen.tgDepthwiseConv2d,
524 ArgGen.agDepthwiseConv2d,
525 ),
526 "types": {
527 "tf": [tf.float32],
528 "tflite": [
529 tf.float32,
530 QuantType.CONV_U8_U8,
531 QuantType.CONV_I8_I8,
532 QuantType.CONV_I16_I8,
533 ],
534 },
535 "template": True,
536 },
537 "depthwise_conv2d_bias_TEMPLATE": {
538 "operands": (1, 2),
539 "build_fcn": (
540 TBuilder.DepthwiseConv2dWithBias,
541 TGen.tgDepthwiseConv2d,
542 ArgGen.agDepthwiseConv2d,
543 ),
544 "types": {
545 "tf": [tf.float32],
546 "tflite": [
547 tf.float32,
548 QuantType.CONV_U8_U8,
549 QuantType.CONV_I8_I8,
550 QuantType.CONV_I16_I8,
551 ],
552 },
553 "bias": True,
554 "template": True,
555 },
556 "transpose_conv2d_TEMPLATE": {
557 "operands": (1, 1),
558 "build_fcn": (
559 TBuilder.TransposeConv2d,
560 TGen.tgTransposeConv2d,
561 ArgGen.agTransposeConv2d,
562 ),
563 "types": {
564 "tf": [tf.float32],
565 "tflite": [
566 tf.float32,
567 QuantType.CONV_U8_U8,
568 QuantType.CONV_I8_I8,
569 QuantType.CONV_I16_I8,
570 ],
571 },
572 "template": True,
573 },
574 "argmax": {
575 "operands": (1, 0),
576 "build_fcn": (TBuilder.Argmax, TGen.tgBasic, ArgGen.agAxes),
577 "types": {"tf": TYPE_F},
578 },
579 "avg_pool2d": {
580 "operands": (1, 0),
581 "build_fcn": (TBuilder.AvgPool2d, TGen.tgPooling, ArgGen.agPooling),
582 "types": {
583 "tf": TYPE_F,
584 "tflite": list(
585 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
586 ),
587 },
588 },
589 "max_pool2d": {
590 "operands": (1, 0),
591 "build_fcn": (TBuilder.MaxPool2d, TGen.tgPooling, ArgGen.agPooling),
592 "types": {
593 "tf": TYPE_F,
594 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
595 # ALL_I16 not supported yet
596 # In tensorflow/compiler/mlir/lite/ir/tfl_ops.td,
597 # QI16 is missing from MaxPoolOperandAndResultConstraints
598 # If adding QI16 back this test can run through.
599 },
600 },
601 "reshape": {
602 "operands": (1, 0),
603 "build_fcn": (TBuilder.Reshape, TGen.tgBasic, ArgGen.agReshape),
604 "types": TYPE_FI,
605 },
606 "transpose": {
607 "operands": (1, 0),
608 "build_fcn": (TBuilder.Transpose, TGen.tgBasic, ArgGen.agTranspose),
609 "types": TYPE_FI,
610 },
611 "slice": {
612 "operands": (1, 0),
613 "build_fcn": (TBuilder.Slice, TGen.tgBasic, ArgGen.agSlice),
614 "types": TYPE_FI,
615 },
616 "strided_slice": {
617 "operands": (1, 0),
618 "build_fcn": (TBuilder.StridedSlice, TGen.tgBasic, ArgGen.agStridedSlice),
619 "types": TYPE_FI,
620 },
621 "select": {
622 "operands": (3, 0),
623 "build_fcn": (TBuilder.Select, TGen.tgSelect, ArgGen.agNone),
624 "types": TYPE_FI,
625 },
626 "addn": {
627 "operands": (4, 0),
628 "build_fcn": (TBuilder.Addn, TGen.tgBasic, ArgGen.agNone),
629 "types": TYPE_FI,
630 },
631 "concatv2": {
632 "operands": (4, 0),
633 "build_fcn": (TBuilder.Concatv2, TGen.tgBasic, ArgGen.agAxes),
634 "types": TYPE_FI,
635 },
636 "stack": {
637 "operands": (4, 0),
638 "build_fcn": (TBuilder.Stack, TGen.tgBasic, ArgGen.agStack),
639 "types": TYPE_FI,
640 },
641 "unstack": {
642 "operands": (1, 0),
643 "build_fcn": (TBuilder.Unstack, TGen.tgPooling, ArgGen.agAxes),
644 "types": TYPE_F,
645 },
TatWai Chongf7008da2022-09-09 09:35:40 +0000646 "mirrorpad": {
647 "operands": (1, 0),
648 "build_fcn": (TBuilder.MirrorPad, TGen.tgBasic, ArgGen.agMirrorPad),
649 "types": TYPE_FI,
650 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000651 "pad": {
652 "operands": (1, 0),
653 "build_fcn": (TBuilder.Pad, TGen.tgBasic, ArgGen.agPad),
TatWai Chong2226f902023-02-22 18:38:01 -0800654 "types": {
655 "tf": TYPE_F,
656 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
657 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000658 },
659 "expand_dims": {
660 "operands": (1, 0),
661 "build_fcn": (TBuilder.ExpandDims, TGen.tgBasic, ArgGen.agStack),
662 "types": TYPE_FI,
663 },
664 "shape": {
665 "operands": (1, 0),
666 "build_fcn": (TBuilder.Shape, TGen.tgBasic, ArgGen.agNone),
667 "types": TYPE_FI,
668 },
669 "rank": {
670 "operands": (1, 0),
671 "build_fcn": (TBuilder.Rank, TGen.tgBasic, ArgGen.agNone),
672 "types": TYPE_FI,
673 },
674 "fill": {
675 "operands": (1, 0),
676 "build_fcn": (TBuilder.Fill, TGen.tgBasic, ArgGen.agFill),
677 "types": TYPE_FI,
678 },
679 "elu": {
680 "operands": (1, 0),
681 "build_fcn": (TBuilder.Elu, TGen.tgBasic, ArgGen.agNone),
682 "types": TYPE_F,
683 },
684 "softmax": {
685 "operands": (1, 0),
686 "build_fcn": (TBuilder.Softmax, TGen.tgBasic, ArgGen.agNone),
687 "types": {
688 "tf": TYPE_F,
689 "tflite": list(
690 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
691 ),
692 },
693 },
694 "log_softmax": {
695 "operands": (1, 0),
696 "build_fcn": (TBuilder.LogSoftmax, TGen.tgBasic, ArgGen.agNone),
697 "types": TYPE_F,
698 },
699 "matmul": {
700 "operands": (2, 0),
701 "build_fcn": (TBuilder.MatMul, TGen.tgMatmul, ArgGen.agNone),
702 "types": {
703 "tf": TYPE_F,
704 "tflite": list(
705 TYPE_F
706 + [QuantType.ALL_U8, QuantType.ALL_I8]
707 # 16 bits matmul fail to convert
708 ),
709 },
710 },
711 "add_scalar": {
712 "operands": (1, 0),
713 "build_fcn": (TBuilder.AddScalar, TGen.tgBasic, ArgGen.agNone),
714 "types": TYPE_F,
715 },
716 "add_1d": {
717 "operands": (2, 0),
718 "build_fcn": (TBuilder.Add1d, TGen.tgBasic, ArgGen.agNone),
719 "types": TYPE_F,
720 },
721 "split": {
722 "operands": (1, 0),
723 "build_fcn": (TBuilder.Split, TGen.tgBasic, ArgGen.agSplit),
724 "types": TYPE_FI,
725 },
726 "tile": {
727 "operands": (1, 0),
728 "build_fcn": (TBuilder.Tile, TGen.tgBasic, ArgGen.agTile),
729 "types": TYPE_FI,
730 },
731 "reverse": {
732 "operands": (1, 0),
733 "build_fcn": (TBuilder.Reverse, TGen.tgBasic, ArgGen.agAxes),
734 "types": {"tf": TYPE_FI},
735 },
736 "gather": {
737 "operands": (1, 0),
738 "build_fcn": (TBuilder.Gather, TGen.tgBasic, ArgGen.agGather),
739 "types": TYPE_FI,
740 },
741 "gather_nd": {
742 "operands": (1, 0),
743 "build_fcn": (TBuilder.GatherNd, TGen.tgBasic, ArgGen.agGatherND),
744 "types": TYPE_FI,
745 },
746 "scatter_nd": {
747 "operands": (1, 0),
748 "build_fcn": (TBuilder.ScatterNd, TGen.tgBasic, ArgGen.agScatterND),
749 "types": TYPE_FI,
750 },
751 "space_to_batch": {
752 "operands": (1, 0),
753 "build_fcn": (TBuilder.SpaceToBatch, TGen.tgBasic, ArgGen.agSpaceToBatch),
754 "types": TYPE_F,
755 },
756 "batch_to_space": {
757 "operands": (1, 0),
758 "build_fcn": (TBuilder.BatchToSpace, TGen.tgBasic, ArgGen.agBatchToSpace),
759 "types": TYPE_F,
760 },
761 "space_to_depth": {
762 "operands": (1, 0),
763 "build_fcn": (TBuilder.SpaceToDepth, TGen.tgBasic, ArgGen.agSpaceToDepth),
764 "types": TYPE_F,
765 },
766 "depth_to_space": {
767 "operands": (1, 0),
768 "build_fcn": (TBuilder.DepthToSpace, TGen.tgBasic, ArgGen.agDepthToSpace),
769 "types": TYPE_F,
770 },
771 "one_hot": {
772 "operands": (3, 1),
773 "build_fcn": (TBuilder.OneHot, TGen.tgOneHot, ArgGen.agOneHot),
774 "types": TYPE_FI,
775 },
776 "fakequant": {
777 "operands": (1, 0),
778 "build_fcn": (
779 TBuilder.Fakequant,
780 TGen.tgBasic,
781 ArgGen.agFakequant,
782 ),
783 "types": {"tf": TYPE_F},
784 },
TatWai Chong0cef07e2023-02-27 13:22:52 -0800785 "resize": {
Jeremy Johnson015c3552022-02-23 12:15:03 +0000786 "operands": (1, 0),
TatWai Chong0cef07e2023-02-27 13:22:52 -0800787 "build_fcn": (TBuilder.Resize, TGen.tgPooling, ArgGen.agResize),
TatWai Chongf7326092022-06-08 12:17:14 -0700788 "types": {
789 "tf": TYPE_F,
790 "tflite": list(
791 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
792 ),
793 },
Jerry Ge5dd5a552023-05-23 22:41:20 +0000794 "custom_shapes": {
795 "custom_shape_only": False,
796 "shape_list": [(3, 1, 1, 7)],
797 },
TatWai Chongf7326092022-06-08 12:17:14 -0700798 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000799 "left_shift": {
800 "operands": (1, 0),
801 "build_fcn": (TBuilder.LeftShift, TGen.tgBasic, ArgGen.agShift),
802 "types": {"tf": [tf.int32]},
803 },
804 "right_shift": {
805 "operands": (1, 0),
806 "build_fcn": (TBuilder.RightShift, TGen.tgBasic, ArgGen.agShift),
807 "types": {
808 "tf": [
809 tf.int32,
810 ]
811 },
812 },
Jerry Ge9e94af82022-10-27 09:57:00 -0700813 "while": {
814 "operands": (1, 0),
815 "build_fcn": (TBuilder.While, TGen.tgBasic, ArgGen.agNone),
816 "types": {
817 "tflite": list(TYPE_F),
818 },
819 },
820 "lstm": {
821 "operands": (1, 0),
822 "build_fcn": (TBuilder.LSTM, TGen.tgRecurrent, ArgGen.agNone),
823 "types": {
824 "tflite": [
825 tf.float32,
826 # tf.int32
827 ]
828 },
829 },
830 "gru": {
831 "operands": (1, 0),
832 "build_fcn": (TBuilder.GRU, TGen.tgRecurrent, ArgGen.agNone),
833 "types": {
834 "tflite": [
835 tf.float32,
836 # tf.int32
837 ]
838 },
839 },
840 "rnn": {
841 "operands": (1, 0),
842 "build_fcn": (TBuilder.RNN, TGen.tgRecurrent, ArgGen.agNone),
843 "types": {
844 "tflite": [
845 tf.float32,
846 ]
847 },
848 },
Luke Hutton261b7b62023-01-10 14:50:31 +0000849 "rfft2d": {
850 "operands": (1, 0),
851 "build_fcn": (TBuilder.RFFT2d, TGen.tgRFFT2d, ArgGen.agRFFT2d),
852 "types": {
853 "tflite": TYPE_F,
854 },
855 },
Luke Hutton714aa602023-02-08 19:45:26 +0000856 "real": {
857 "operands": (1, 0),
858 "build_fcn": (TBuilder.Real, TGen.tgComplexComponents, ArgGen.agNone),
859 "types": {
860 "tflite": [tf.complex64],
861 },
862 },
863 "imag": {
864 "operands": (1, 0),
865 "build_fcn": (TBuilder.Imag, TGen.tgComplexComponents, ArgGen.agNone),
866 "types": {
867 "tflite": [tf.complex64],
868 },
869 },
Tai Lyfe36fa92023-06-01 21:45:12 +0000870 "broadcastto": {
871 "operands": (1, 1),
872 "build_fcn": (TBuilder.BroadcastTo, TGen.tgBroadcastTo, ArgGen.agNone),
873 "types": {
874 "tf": TYPE_FIB,
875 },
876 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000877}
878
879# Shapes to be tested; default can be overwritten
880shape_list = [
881 (1,),
882 (64,),
883 (14, 19),
884 (13, 21, 3),
Luke Hutton261b7b62023-01-10 14:50:31 +0000885 (1, 8, 16),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000886 (1, 4, 4, 4),
887 (1, 8, 4, 17),
888 (1, 4, 8, 19),
889 (1, 32, 32, 8),
890 (1, 7, 7, 9),
TatWai Chong0cef07e2023-02-27 13:22:52 -0800891 (3, 1, 1, 7),
TatWai Chongfd629052022-07-25 04:01:58 +0000892 (2, 2, 7, 7, 2),
893 (1, 4, 8, 21, 17),
894 (3, 32, 16, 16, 5),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000895]
896
897
898def gen_rand_shapes(args):
899 """Overwrite the global shape list with a new list of random shapes"""
900 global shape_list
901
902 rng = np.random.default_rng(args.random_seed)
903
904 # Don't let things get too big... cap the maximum volume, but let
905 # an individual dimension be 1..47
906 max_total_volume = 32 * 32 * 4
907
908 shape_list = []
TatWai Chongfd629052022-07-25 04:01:58 +0000909 # Only iterate over ranks 2, 3, 4, and 5
910 for rank in range(2, 6):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000911 for n in range(args.random_shapes):
912 new_shape = rng.integers(1, 48, size=rank)
913
TatWai Chongfd629052022-07-25 04:01:58 +0000914 # Set the batch dimension on 4D or 5D objects to 1
915 if rank == 4 or rank == 5:
Jeremy Johnson015c3552022-02-23 12:15:03 +0000916 new_shape[0] = 1
917
918 # Limit the total shape volume and throw out any
919 # shapes that wouldn't leave at least size=2 in some non-batch dimension
920 volume = 1
921 skip_shape = False
922 for i in range(rank):
923
924 volume *= new_shape[i]
925
926 # Reduce the shape, while it's larger than the maximum volume
927 while volume > max_total_volume:
928 new_shape[i] = new_shape[i] // 2
929 volume = volume // 2
930
931 # Now an untenable dimension size? Skip this one.
932 if new_shape[i] < 1:
933 skip_shape = True
934
935 if not skip_shape:
936 shape_list.append(tuple(new_shape))
937
938
939# Construct, run and save a whole tensorflow tf.function to a protobuf file
940# or convert to .tflite if it's quantized unit test
941def run_unit_test(
942 op_name,
943 args,
944 test_dir,
945 curr_shape,
946 addl_args,
947 dtype,
948 excluded_framework_list,
949 quantized_inference_dtype,
950 result_name,
951 seed,
952):
953
954 try:
955 op = TF_OP_LIST[op_name]
956 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
957
958 # Get and seed a random number generator for this test
959 rng = np.random.default_rng(seed)
960
961 # return placeholders=(str: name, np.array: value)
962 # consts=(str: name, np.array: value)
963 placeholders, consts = tensor_gen_fcn(op, curr_shape, dtype, rng)
964
965 # if test doesn't have any placeholders/consts, terminated
966 if len(placeholders) == 0 and len(consts) == 0:
967 return True
968
969 if not args.quiet:
970 print(" {} ".format(test_dir))
971
972 try:
973 os.mkdir(test_dir)
974 except FileExistsError:
975 pass
976
977 const_nodes = [value for name, value in consts]
978
979 num_placeholders = len(placeholders)
980 # if test is quantized, create tensor quantization metadata info for
981 # each input tensor, based on different quantized type
982 if quantized_inference_dtype:
983 is_quantized = True
984 # TODO: support INT8 IFM x INT4 weight later
985 if quantized_inference_dtype == QuantType.ALL_U8:
986 qzero = [128] * num_placeholders
987 numpy_dtype = [np.uint8] * num_placeholders
988 tflite_inference_dtype = tf.uint8
989 elif quantized_inference_dtype == QuantType.ALL_I8:
990 qzero = [0] * num_placeholders
991 numpy_dtype = [np.int8] * num_placeholders
992 tflite_inference_dtype = tf.int8
993 elif quantized_inference_dtype == QuantType.ALL_I16:
994 qzero = [0] * num_placeholders
995 numpy_dtype = [np.int16] * num_placeholders
996 tflite_inference_dtype = tf.int16
997 elif quantized_inference_dtype == QuantType.CONV_U8_U8:
998 assert (
999 num_placeholders == 1
1000 ), "Unsupported number of placeholders for Convolution: {}".format(
1001 num_placeholders
1002 )
1003 qzero = [128] * num_placeholders
1004 if num_placeholders == 2:
1005 numpy_dtype = [np.uint8, np.uint8]
1006 else:
1007 numpy_dtype = [np.uint8, np.uint8, np.int32]
1008 tflite_inference_dtype = tf.uint8
1009 elif quantized_inference_dtype == QuantType.CONV_I8_I8:
1010 assert (
1011 num_placeholders == 1
1012 ), "Unsupported number of placeholders for Convolution: {}".format(
1013 num_placeholders
1014 )
1015 qzero = [0] * num_placeholders
1016 if num_placeholders == 2:
1017 numpy_dtype = [np.int8, np.int8]
1018 else:
1019 numpy_dtype = [np.int8, np.int8, np.int32]
1020 tflite_inference_dtype = tf.int8
1021 elif quantized_inference_dtype == QuantType.CONV_I16_I8:
1022 assert (
1023 num_placeholders == 1
1024 ), "Unsupported number of placeholders for Convolution: {}".format(
1025 num_placeholders
1026 )
1027 if num_placeholders == 2:
1028 qzero = [0, 0]
1029 numpy_dtype = [np.int16, np.int8]
1030 else:
1031 qzero = [0, 0, 0]
1032 numpy_dtype = [
1033 np.int16,
1034 np.int8,
1035 np.int64,
1036 ] # np.int64 to represent 40 bits accumulator
1037 tflite_inference_dtype = tf.int16
1038 else:
1039 raise Exception(
1040 "Unsupported fakequant dtype: {}".format(quantized_inference_dtype)
1041 )
1042
1043 else:
1044 is_quantized = False
1045
1046 tf_model_filename = None
1047 tf_result_npy_filename = None
1048 tf_result_name = None
1049
1050 tflite_model_filename = None
1051 tflite_result_npy_filename = None
1052 tflite_result_name = None
1053
1054 placeholder_names = []
1055 placeholder_vals = []
1056 placeholder_signatures = ()
1057 placeholder_npy_filenames = []
1058 placeholder_shapes = []
1059
1060 for idx, (name, val) in enumerate(placeholders):
1061 placeholder_names.append(name)
1062 placeholder_signatures = placeholder_signatures + (
1063 tf.TensorSpec(shape=val.shape, dtype=val.dtype, name=name),
1064 )
1065 placeholder_npy_filenames.append("{}.npy".format(name.split(":")[0]))
1066 placeholder_shapes.append(val.shape)
1067
1068 # Get test builder class
1069 fcn_node = op_fcn(*const_nodes, *addl_args, result_name)
1070 concrete_function = tf.function(input_signature=placeholder_signatures)(
1071 fcn_node.eval
1072 ).get_concrete_function()
1073
1074 if is_quantized:
1075
1076 assert dtype is tf.float32, "quantized test must come from float32 graph"
1077
1078 # 1. Quantize float placeholder npy to quantized to feed the graph
1079 for idx, (name, val) in enumerate(placeholders):
1080
1081 # we use np.amin()/np.amax() to determine dynamic range
1082 # for quantized test
1083 zeropoint = 0
1084 scale = 1.0
1085 if numpy_dtype[idx] != np.int64:
1086 qmin = np.iinfo(numpy_dtype[idx]).min
1087 qmax = np.iinfo(numpy_dtype[idx]).max
1088 num_bits = np.iinfo(numpy_dtype[idx]).bits
1089 # 40 bit is represented as np.int64
1090 else:
1091 num_bits = 40
1092 qmin = -(1 << num_bits)
1093 qmax = (1 << num_bits) - 1
1094
1095 min_val = np.amin(val)
1096 max_val = np.amax(val)
1097
1098 # for single value tensor, we set scale equal to the abs(value),
1099 # and fix zeropoint to 128
1100 # if val > 0, it'll be represented as 129,
1101 # where val = (129 - 128) * val
1102 # if val < 0, it'll be represented as 127,
1103 # where val = (127 - 128) * (-val)
1104 # if val == 0, it'll be represted as 128, with range [-128.0, 128.0]
1105 # and let quantized 1 represent the value
1106 # also adjust effective min/max consequently
1107 if max_val == min_val:
1108 if max_val != 0:
1109 scale = abs(max_val)
1110 else:
1111 scale = 1.0
1112 min_val = float(qmin - qzero[idx]) * scale
1113 max_val = float(qmax - qzero[idx]) * scale
1114 else:
1115 scale = (max_val - min_val) / float(qmax - qmin)
1116 zeropoint = int(round((-min_val) / scale)) + qmin
1117
1118 # run through tf.fakequant first to assure quantization error aligned
1119 fakequant_val = tf.quantization.fake_quant_with_min_max_args(
1120 val,
1121 min=min_val,
1122 max=max_val,
1123 num_bits=num_bits,
1124 name="gen_quant_npy",
1125 )
1126
1127 quant_val = np.round(fakequant_val / scale).astype(np.int32) + zeropoint
1128
1129 # very few unit tests after TF hash may/2020, this quantized
1130 # value for some reason exceed [0, 255] range
1131 saved_val = np.clip(quant_val, qmin, qmax).astype(numpy_dtype[idx])
1132
1133 # saved all quantized tensor as np.int32
1134 # since TOSA numpy Cpp API only supports int32
1135 np.save(
1136 os.path.join(test_dir, placeholder_npy_filenames[idx]),
1137 saved_val.astype(np.int32),
1138 False,
1139 )
1140
1141 placeholder_vals.append(tf.convert_to_tensor(saved_val))
1142
1143 # 2. Convert the model to quantized TFLite flatbuffer
1144 module = tf.Module()
1145 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1146 [concrete_function], module
1147 )
1148 converter.optimizations = [tf.lite.Optimize.DEFAULT]
1149 converter.experimental_new_converter = True
1150
1151 # use MLIR-based post-quantizer
1152 converter.experimental_new_quantizer = True
1153
1154 flag = (
1155 tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 # noqa: E501
1156 )
1157 if tflite_inference_dtype == tf.int16:
1158 converter.target_spec.supported_ops = [flag]
1159
1160 def input_stats():
Jerry Geb1f25012023-03-03 11:33:51 -08001161 ## Rsqrt can only handle positive numbers
1162 elem_signedness = ElemSignedness.ALL_RANGE
1163 if op_name == "rsqrt":
1164 elem_signedness = ElemSignedness.POSITIVE
1165
Jeremy Johnson015c3552022-02-23 12:15:03 +00001166 for i in range(0, args.num_samples):
1167 a = [
Jerry Geb1f25012023-03-03 11:33:51 -08001168 TGen.getRand(shape, tf.float32, rng, elem_signedness)
Jeremy Johnson015c3552022-02-23 12:15:03 +00001169 for shape in placeholder_shapes
1170 ]
1171 yield a
1172
1173 converter.representative_dataset = input_stats
1174 converter.inference_input_type = tflite_inference_dtype
1175 converter.inference_output_type = tflite_inference_dtype
1176
1177 tflite_model = converter.convert()
1178
1179 tflite_model_filename = "model.tflite"
1180
1181 # Write out converted model to disk
1182 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1183 f.write(tflite_model)
1184
1185 else: # is_quantized is False
1186
1187 # 1. Saved out numpy array directly
1188 for idx, (name, val) in enumerate(placeholders):
1189 placeholder_vals.append(tf.convert_to_tensor(val))
Luke Hutton714aa602023-02-08 19:45:26 +00001190
1191 # Complex tensors are expected to be repsesented by a
1192 # single floating point tensor of shape [?, ..., ?, 2].
1193 if val.dtype == np.complex64:
1194 val_shape = val.shape + (2,)
1195 val = val.view(np.float32)
1196 val = val.reshape(val_shape)
1197
Jeremy Johnson015c3552022-02-23 12:15:03 +00001198 np.save(
1199 os.path.join(test_dir, placeholder_npy_filenames[idx]), val, False
1200 )
1201
1202 # 2.a Saved out .pb if framework includes tensorflow
1203 if "tf" not in excluded_framework_list:
1204 # Write out graph as protobuf to disk
1205 tf_model_filename = "model.pb"
1206 tf.io.write_graph(
1207 concrete_function.graph, test_dir, tf_model_filename, True
1208 )
1209
1210 # 2.b Saved out .tflite if framework includes tflite
1211 if "tflite" not in excluded_framework_list:
1212 # Convert the model to TFLite flatbuffer
1213 module = tf.Module()
1214 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1215 [concrete_function], module
1216 )
1217
1218 converter.experimental_new_converter = True
1219
1220 # Even it's non-quantized int32 test, this needs to be set to tf.float32
1221 converter.inference_input_type = tf.float32
1222 converter.inference_output_type = tf.float32
1223 tflite_model = converter.convert()
1224
1225 # Write out converted model to disk
1226 tflite_model_filename = "model.tflite"
1227 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1228 f.write(tflite_model)
1229
1230 # Get TF reference result if .pb is specified
1231 if tf_model_filename:
1232 tf_result_npy_filename = "tf_result.npy"
1233 tf_result = concrete_function(*placeholder_vals)
1234 np.save(os.path.join(test_dir, tf_result_npy_filename), tf_result, False)
1235
1236 tf_result_name = result_name
1237
1238 # Get TFLite inference result if .tflite is specified
1239 if tflite_model_filename:
1240 tflite_result_npy_filename = "tflite_result.npy"
1241
Luke Hutton5c844212023-01-27 14:17:52 +00001242 ops_with_optimized_only_kernel = ["elu", "ceil", "gather", "rfft2d"]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001243
1244 if args.tflite_kernel_mode == "optimized" or (
1245 op_name in ops_with_optimized_only_kernel
1246 ):
1247 interpreter = tf.lite.Interpreter(
1248 model_path=os.path.join(test_dir, tflite_model_filename)
1249 )
1250 elif args.tflite_kernel_mode == "reference":
1251 interpreter = tf.lite.Interpreter(
1252 model_path=os.path.join(test_dir, tflite_model_filename),
1253 experimental_op_resolver_type=OpResolverType.BUILTIN_REF,
1254 )
1255 else:
1256 assert 0, "unknown tflite interpreter mode {}".format(
1257 args.tflite_kernel_mode
1258 )
1259 interpreter.allocate_tensors()
1260
1261 input_details = interpreter.get_input_details()
1262 output_details = interpreter.get_output_details()
1263
1264 assert len(input_details) == len(
1265 placeholder_vals
1266 ), "number of placeholder mismatch"
1267
1268 for idx, val in enumerate(placeholder_vals):
1269 interpreter.set_tensor(input_details[idx]["index"], val.numpy())
1270
1271 interpreter.invoke()
1272 tflite_result = interpreter.get_tensor(output_details[0]["index"])
1273
1274 np.save(
1275 os.path.join(test_dir, tflite_result_npy_filename), tflite_result, False
1276 )
1277
1278 # Result tensor name would change after converting to TFLite flatbuffer
1279 # Overwrite the information from TFLite models directly.
1280 # Assume single result tensor now
1281 tflite_result_name = output_details[0]["name"]
1282
1283 # Write out test descriptor
1284 write_test_json(
1285 filename=os.path.join(test_dir, "test.json"),
1286 tf_model_filename=tf_model_filename,
1287 tf_result_npy_filename=tf_result_npy_filename,
1288 tf_result_name=tf_result_name,
1289 tflite_model_filename=tflite_model_filename,
1290 tflite_result_npy_filename=tflite_result_npy_filename,
1291 tflite_result_name=tflite_result_name,
1292 ifm_name=placeholder_names,
1293 ifm_file=placeholder_npy_filenames,
1294 ifm_shape=placeholder_shapes,
1295 framework_exclusions=excluded_framework_list,
1296 quantized=is_quantized,
1297 )
1298 except Exception as e:
1299 msg = "Error running task: {}".format(e)
1300 print(msg)
1301 print(
1302 "".join(
1303 traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)
1304 )
1305 )
1306 return False
1307 return True
1308
1309
1310def build_const_net(
1311 args,
1312 curr_shape,
1313 op_name,
1314 dtype,
1315 excluded_framework_list,
1316 quantized_inference_dtype,
1317 result_name,
1318 seed,
1319 rng,
1320 filter,
1321 unit_test_args,
1322):
1323
1324 if quantized_inference_dtype:
1325 quant_dtype = get_tf_dtype(quantized_inference_dtype)
1326 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, quant_dtype))
1327 else:
1328 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, dtype))
1329 test_dir = os.path.join(args.output_dir, test_dir)
1330
1331 # If the operator has an additional function to generate arguments, call it
1332 # here and iterate through the argument list that it generates
1333 op = TF_OP_LIST[op_name]
1334 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1335
TatWai Chongfd629052022-07-25 04:01:58 +00001336 try:
1337 rank_lo, rank_hi = op["rank"]
1338 except KeyError:
1339 # Set testing rank to (1, 4) in default.
1340 rank_lo = 1
1341 rank_hi = 4
1342
1343 if len(curr_shape) not in range(rank_lo, rank_hi + 1):
1344 return
1345
Jeremy Johnson015c3552022-02-23 12:15:03 +00001346 addl_args_tuple = arg_gen_fcn(op, curr_shape, rng)
1347 for desc, addl_args in addl_args_tuple:
Jeremy Johnson0e6218e2022-05-05 17:08:04 +01001348 # Only filter on the full test_name, not the output directory
1349 _, test_name = os.path.split(test_dir + desc)
1350 if not filter or filter.search(test_name):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001351 unit_test_args.append(
1352 [
1353 op_name,
1354 args,
1355 test_dir + desc,
1356 curr_shape,
1357 addl_args,
1358 dtype,
1359 excluded_framework_list,
1360 quantized_inference_dtype,
1361 result_name,
1362 seed,
1363 ]
1364 )
1365
1366
1367# python hash is not reproducible, create hash for our purpose
1368def op_name_hash(op_name):
1369 result = 0xDEADBEEF
1370 for ch in op_name:
1371 if result & 1:
1372 result = (ord(ch) << 24) ^ (result >> 1) ^ 0x82608EDB
1373 else:
1374 result = (ord(ch) << 24) ^ (result >> 1)
1375
1376 return result
1377
1378
1379def generate_op_tests(args, op_name, shape_list, result_name, filter, unit_test_args):
1380
1381 if not args.quiet:
1382 print(
1383 "Generating tests for {} ".format(
1384 op_name
1385 )
1386 )
1387
1388 op = TF_OP_LIST[op_name]
1389
1390 # Seed the RNG so that we get the same random tests for each test each time
1391 # If the number of tests for a given generation function changes, the tests
1392 # for that operator may also change accordingly, but this will at least keep
1393 # down churn across operators.
1394
1395 bounded_hash_val = (args.random_seed + op_name_hash(op_name)) % np.iinfo(
1396 np.int32
1397 ).max
1398 rng = np.random.default_rng(bounded_hash_val)
1399
1400 # this is a dictionary with 'tf' and 'tflite' as key
1401 # and value being the data types we want to test under these framework
1402
1403 if isinstance(op["types"], dict):
1404 try:
1405 tf_dtypes = op["types"]["tf"]
1406 except KeyError:
1407 tf_dtypes = []
1408 try:
1409 tflite_dtypes = op["types"]["tflite"]
1410 except KeyError:
1411 tflite_dtypes = []
1412 elif isinstance(op["types"], list):
1413 tf_dtypes = op["types"]
1414 tflite_dtypes = op["types"]
1415
1416 tf_nonquantized_dtypes = tf_dtypes # tf doesn't support quantized data types
1417 tflite_quantized_dtypes = []
1418 tflite_nonquantized_dtypes = []
1419 for dtype in tflite_dtypes:
1420 if isinstance(dtype, QuantType):
1421 tflite_quantized_dtypes.append(dtype)
1422 else:
1423 tflite_nonquantized_dtypes.append(dtype)
1424
1425 nonquantized_dtypes_set = set(tf_nonquantized_dtypes).union(
1426 set(tflite_nonquantized_dtypes)
1427 )
1428 nonquantized_dtypes = list(nonquantized_dtypes_set)
1429 quantized_dtypes = tflite_quantized_dtypes
1430
Jerry Ge5dd5a552023-05-23 22:41:20 +00001431 # append custom_shapes or replace shape_list with custom_shapes
1432 try:
1433 custom_shapes = op["custom_shapes"]
1434 if custom_shapes["custom_shape_only"]:
1435 shape_list = custom_shapes["shape_list"]
1436 else:
Jerry Geabdac232023-06-12 16:27:16 +00001437 shape_list = shape_list.copy()
1438 shape_list.append(custom_shapes["shape_list"])
Jerry Ge5dd5a552023-05-23 22:41:20 +00001439 except KeyError:
1440 pass
1441
Jeremy Johnson015c3552022-02-23 12:15:03 +00001442 # populate non quantized unit test arguments
1443 for dtype in nonquantized_dtypes:
1444
1445 excluded_framework_set = set(ALL_FRAMEWORKS)
1446 if dtype in tf_nonquantized_dtypes:
1447 excluded_framework_set.remove("tf")
1448 if dtype in tflite_nonquantized_dtypes:
1449 excluded_framework_set.remove("tflite")
1450 excluded_framework_list = list(excluded_framework_set)
1451
1452 for curr_shape in shape_list:
1453 build_const_net(
1454 args,
1455 curr_shape,
1456 op_name,
1457 dtype,
1458 excluded_framework_list,
1459 None,
1460 result_name,
1461 bounded_hash_val,
1462 rng,
1463 filter,
1464 unit_test_args,
1465 )
1466
1467 # populate quantized unit test arguments
1468 # must exclude 'tf' and source dtype being tf.float32
1469 for dtype in quantized_dtypes:
1470 for curr_shape in shape_list:
1471 build_const_net(
1472 args,
1473 curr_shape,
1474 op_name,
1475 tf.float32,
1476 ["tf"],
1477 dtype,
1478 result_name,
1479 bounded_hash_val,
1480 rng,
1481 filter,
1482 unit_test_args,
1483 )
1484
1485 return unit_test_args
1486
1487
1488def createDynamicOpLists():
1489 """The templated operators are conv2d-style operators with a number of kernel
1490 sizes. Since the operator is unchanged, we generate the range of kernel
1491 sizes here in this loop and remove the original templates from the list.
1492
1493 This could be expanded to non-conv2d-style operators in the future."""
1494
1495 # Dynamically create op lists for convolutions with a list of kernel sizes
1496 KERNELS = [
1497 [1, 1],
1498 [3, 3],
1499 [5, 5],
1500 ]
1501
TatWai Chongfd629052022-07-25 04:01:58 +00001502 # dim = [D, H, W]
1503 KERNELS_3D = [
1504 [1, 1, 1],
1505 [2, 3, 3],
1506 [3, 5, 5],
1507 ]
1508
Jeremy Johnson015c3552022-02-23 12:15:03 +00001509 TEMPLATE_LIST = [
1510 "conv2d",
1511 "conv2d_bias",
1512 "conv2d_relu",
1513 "conv2d_relu6",
1514 "conv2d_relu_n1_to_1",
1515 "conv2d_tanh",
1516 "depthwise_conv2d",
1517 "depthwise_conv2d_bias",
1518 "transpose_conv2d",
1519 ]
1520
TatWai Chongfd629052022-07-25 04:01:58 +00001521 TEMPLATE_LIST_CONV3D = [
1522 "conv3d",
1523 "conv3d_bias",
1524 ]
1525
Jeremy Johnson015c3552022-02-23 12:15:03 +00001526 for t in TEMPLATE_LIST:
1527 for k in KERNELS:
1528 testName = "{}_{}x{}".format(t, k[0], k[1])
1529 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1530 TF_OP_LIST[testName]["filter"] = k
1531 TF_OP_LIST[testName]["template"] = False
1532
TatWai Chongfd629052022-07-25 04:01:58 +00001533 # The existing operators don't support the dimension of kernel that is higher than 2.
1534 for t in TEMPLATE_LIST_CONV3D:
1535 for k in KERNELS_3D:
1536 testName = "{}_{}x{}x{}".format(t, k[0], k[1], k[2])
1537 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1538 TF_OP_LIST[testName]["filter"] = k
1539 TF_OP_LIST[testName]["template"] = False
1540
Jeremy Johnson015c3552022-02-23 12:15:03 +00001541 # Delete any templates after having created any dynamic ops
1542 # This is a two-pass operation because it's bad practice to delete
1543 # keys from dictionaries while iterating
1544 keyList = []
1545 for k in TF_OP_LIST:
1546 try:
1547 if TF_OP_LIST[k]["template"]:
1548 keyList.append(k)
1549 continue
1550 except KeyError:
1551 pass
1552
1553 for k in keyList:
1554 del TF_OP_LIST[k]
1555
1556
1557def main():
1558 parser = argparse.ArgumentParser()
1559 parser.add_argument(
1560 "--seed", dest="random_seed", default=42, type=int, help="Random seed"
1561 )
1562 parser.add_argument(
1563 "--random-shapes",
1564 dest="random_shapes",
1565 default=0,
1566 type=int,
1567 help=(
1568 "Use N random shapes of each rank for generating tests,"
1569 "seeded with random seed"
1570 ),
1571 )
1572 parser.add_argument(
1573 "-o",
1574 "--output-dir",
1575 dest="output_dir",
1576 default=".",
1577 type=str,
1578 help="Test output directory path prefix",
1579 )
1580 parser.add_argument(
1581 "-q",
1582 "--quiet",
1583 dest="quiet",
1584 default=False,
1585 action="store_true",
1586 help="Do not print test names",
1587 )
1588 parser.add_argument(
1589 "-j", "--jobs", dest="jobs", type=int, default=1, help="Number of parallel jobs"
1590 )
1591 parser.add_argument(
1592 "-m",
1593 "--tflite-kernel-mode",
1594 dest="tflite_kernel_mode",
1595 type=str,
1596 choices=["reference", "optimized"],
1597 default="reference",
1598 help="TFLite interpreter kernel mode",
1599 )
1600 parser.add_argument(
1601 "--num-samples",
1602 dest="num_samples",
1603 default=200,
1604 type=int,
1605 help="Number of input samples for post-training quantization",
1606 )
1607 parser.add_argument(
1608 "--filter",
1609 dest="filter",
1610 default="",
1611 type=str,
1612 help="Filter test names by this expression",
1613 )
1614 args = parser.parse_args()
1615
1616 # Turn the filter into a re object if present
1617 filter = None
1618 if args.filter != "":
1619 filter = re.compile(args.filter)
1620
1621 # Autodetect CPU count
1622 if args.jobs <= 0:
1623 args.jobs = os.cpu_count()
1624
1625 # Disable TF info messages
1626 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
1627
1628 try:
1629 os.makedirs(args.output_dir)
1630 except FileExistsError:
1631 pass
1632
1633 if args.random_shapes:
1634 gen_rand_shapes(args)
1635
1636 # Build dynamic ops
1637 createDynamicOpLists()
1638
1639 # Generate the test list and arguments to run_unit_test()
1640 unit_test_args = []
1641
1642 for op in TF_OP_LIST:
1643 generate_op_tests(args, op, shape_list, "result", filter, unit_test_args)
1644
1645 errors = 0
1646 for t in unit_test_args:
1647 if not run_unit_test(*t):
1648 errors = errors + 1
1649
1650 if not args.quiet:
1651 print("\nAll tasks done - with {} errors".format(errors))
1652
1653 return 1 if errors else 0
1654
1655
1656if __name__ == "__main__":
1657 exit(main())