blob: 58df588ec77b2bf0708bc7df226b766822e6fe4f [file] [log] [blame]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001#!/usr/bin/env python3
Luke Hutton261b7b62023-01-10 14:50:31 +00002# Copyright (c) 2020-2023, ARM Limited.
Jeremy Johnson015c3552022-02-23 12:15:03 +00003# SPDX-License-Identifier: Apache-2.0
4import argparse
5import os
6import re
7import traceback
8
9import numpy as np
10
11# Level | Level for Humans | Level Description
12# -------|------------------|------------------------------------
13# 0 | DEBUG | [Default] Print all messages
14# 1 | INFO | Filter out INFO messages
15# 2 | WARNING | Filter out INFO & WARNING messages
16# 3 | ERROR | Filter out all messages
17# Filter tensorflow debug message except errors
18os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
19
20# Flake8 E402 - ignore imports not at top of file to allow os.environ setting
21import tensorflow as tf # noqa: E402
22from frameworks.write_test_json import write_test_json # noqa: E402
23from frameworks.arg_gen import ArgGen # noqa: E402
24from frameworks.tensor_gen import TGen # noqa: E402
Jerry Geb1f25012023-03-03 11:33:51 -080025from frameworks.tensor_gen import ElemSignedness # noqa: E402
Jeremy Johnson015c3552022-02-23 12:15:03 +000026from frameworks.test_builder import TBuilder # noqa: E402
Jeremy Johnson5d1a3472022-03-31 09:50:06 +010027from frameworks.test_gen_utils import ( # noqa: E402
Jeremy Johnson015c3552022-02-23 12:15:03 +000028 QuantType,
29 get_tf_dtype,
30 get_shape_str,
31) # noqa: E402
32from tensorflow.lite.python.interpreter import OpResolverType # noqa: E402
33
34# All of the supported frameworks
35ALL_FRAMEWORKS = ["tf", "tflite"]
36
37# Lists of different data types
38TYPE_F = [tf.float32]
39TYPE_I = [tf.int32]
40TYPE_FI = [tf.float32, tf.int32]
41TYPE_B = [tf.bool]
42TYPE_FIB = [tf.float32, tf.int32, tf.bool]
43TYPE_H = [tf.float16]
44TYPE_FH = [tf.float32, tf.float16]
45TYPE_FHI = [tf.float32, tf.float16, tf.int32]
46TYPE_FHIB = [tf.float32, tf.float16, tf.int32, tf.bool]
47
48# The list of operator tests
49# Each dictionary entry for an op is a dictionary with the following required members:
50# 'operands': tuple (number_of_placeholder_tensors, number_of_constant_tensors)
51# 'build_fcn: tuple (Test builder function, Tensor generator function,
52# Argument generator function)
53# 'types': list of Tensorflow types that should be tested for this op
54# OR
55# a dictionary of {'framework_name': [type_list] } for cases where only
56# a subset of the types should be tested in each framework. This can also
57# be used to restrict an operator to a particular framework.
58#
59# And optional members:
Jerry Ge5dd5a552023-05-23 22:41:20 +000060# 'template': boolean (indicates that this is a templated op which gets further
61# processing in createDynamicOpLists)
62# 'bias': boolean indicating that there is a bias component to be generated
63# 'qtypes': List of QuantType quantized types to generate for this op
64# 'rank': tuple (lowest rank, highest rank). Dimension range of input tensor.
65# 'custom_shapes': List of custom shapes for specific operators
Jeremy Johnson015c3552022-02-23 12:15:03 +000066
67TF_OP_LIST = {
68 "add": {
69 "operands": (2, 0),
70 "build_fcn": (TBuilder.Add, TGen.tgBFuzz, ArgGen.agNone),
71 "types": {
72 "tf": TYPE_FI,
73 "tflite": list(
74 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
75 ),
76 },
77 },
78 "sub": {
79 "operands": (2, 0),
80 "build_fcn": (TBuilder.Sub, TGen.tgBFuzz, ArgGen.agNone),
81 "types": {
82 "tf": TYPE_FI,
83 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
84 # QuantType.ALL_I16 fail in TFLite conversion
85 },
86 },
87 "mul": {
88 "operands": (2, 0),
89 "build_fcn": (TBuilder.Mul, TGen.tgBFuzz, ArgGen.agNone),
90 "types": {
91 "tf": TYPE_FI,
92 "tflite": list(
93 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
94 ),
95 },
96 },
97 "exp": {
98 "operands": (1, 0),
99 "build_fcn": (TBuilder.Exp, TGen.tgBasic, ArgGen.agNone),
100 "types": TYPE_F,
101 },
102 "rcp": {
103 "operands": (1, 0),
104 "build_fcn": (TBuilder.Rcp, TGen.tgBasic, ArgGen.agNone),
105 "types": TYPE_F,
106 },
107 "relu": {
108 "operands": (1, 0),
109 "build_fcn": (TBuilder.Relu, TGen.tgBasic, ArgGen.agNone),
110 "types": {
111 "tf": TYPE_F,
112 "tflite": list(
113 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
114 ),
115 },
116 },
Jerry Ge93912432022-07-22 10:29:13 -0700117 "relu1": {
118 "operands": (1, 0),
119 "build_fcn": (TBuilder.Relu1, TGen.tgBasic, ArgGen.agNone),
120 "types": {
121 "tf": [],
122 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
123 },
124 },
Jerry Ge2eea5bf2022-10-11 16:27:05 +0000125 "relu0To1": {
126 "operands": (1, 0),
127 "build_fcn": (TBuilder.Relu0To1, TGen.tgBasic, ArgGen.agNone),
128 "types": {
129 "tf": [],
130 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
131 },
132 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000133 "relu6": {
134 "operands": (1, 0),
135 "build_fcn": (TBuilder.Relu6, TGen.tgBasic, ArgGen.agNone),
136 "types": {
137 "tf": TYPE_F,
138 "tflite": list(
139 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
140 ),
141 },
142 },
143 "leaky_relu": {
144 "operands": (1, 0),
145 "build_fcn": (TBuilder.LeakyRelu, TGen.tgBasic, ArgGen.agFloat),
146 "types": {
147 "tf": TYPE_F,
148 "tflite": list(
149 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
150 ),
151 },
152 },
TatWai Chong41a04fe2022-11-03 21:44:32 +0000153 "prelu": {
154 "operands": (1, 0),
155 "build_fcn": (TBuilder.Prelu, TGen.tgBasic, ArgGen.agNone),
156 "types": {
157 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
158 },
159 },
TatWai Chong473eb382022-08-02 04:21:30 +0000160 "gelu": {
161 "operands": (1, 0),
162 "build_fcn": (TBuilder.Gelu, TGen.tgBasic, ArgGen.agNone),
163 "types": {
164 # Need compiler support for tf.Erf.
165 # "tf": TYPE_F,
166 "tflite": list(
167 # Only float32, int8 and uint8 supported currently
168 TYPE_F
169 + [QuantType.ALL_U8, QuantType.ALL_I8]
170 ),
171 },
172 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000173 "concat": {
174 "operands": (2, 0),
175 "build_fcn": (TBuilder.Concat, TGen.tgBasic, ArgGen.agAxes),
176 "types": TYPE_FI,
177 },
178 "bitwise_and": {
179 "operands": (2, 0),
180 "build_fcn": (TBuilder.BitwiseAnd, TGen.tgBFuzz, ArgGen.agNone),
181 "types": {"tf": TYPE_I}, # Not supported in TF Lite
182 },
183 "bitwise_or": {
184 "operands": (2, 0),
185 "build_fcn": (TBuilder.BitwiseOr, TGen.tgBFuzz, ArgGen.agNone),
186 "types": {"tf": TYPE_I}, # Not supported in TF Lite
187 },
188 "bitwise_not": {
189 "operands": (1, 0),
190 "build_fcn": (TBuilder.BitwiseNot, TGen.tgBFuzz, ArgGen.agNone),
191 "types": {"tf": TYPE_I}, # Not supported in TF Lite
192 },
193 "bitwise_xor": {
194 "operands": (2, 0),
195 "build_fcn": (TBuilder.BitwiseXor, TGen.tgBFuzz, ArgGen.agNone),
196 "types": {"tf": TYPE_I}, # Not supported in TF Lite
197 },
198 "logical_and": {
199 "operands": (2, 0),
200 "build_fcn": (TBuilder.LogicalAnd, TGen.tgBFuzz, ArgGen.agNone),
201 "types": TYPE_B,
202 },
203 "logical_or": {
204 "operands": (2, 0),
205 "build_fcn": (TBuilder.LogicalOr, TGen.tgBFuzz, ArgGen.agNone),
206 "types": TYPE_B,
207 },
208 "logical_not": {
209 "operands": (1, 0),
210 "build_fcn": (TBuilder.LogicalNot, TGen.tgBFuzz, ArgGen.agNone),
211 "types": TYPE_B,
212 },
213 "reduce_any": {
214 "operands": (1, 0),
215 "build_fcn": (TBuilder.ReduceAny, TGen.tgBasic, ArgGen.agAxesListKeepdims),
216 "types": TYPE_B,
217 },
218 "reduce_all": {
219 "operands": (1, 0),
220 "build_fcn": (TBuilder.ReduceAll, TGen.tgBasic, ArgGen.agAxesListKeepdims),
TatWai Chong6ad6d6e2022-11-11 15:28:50 -0800221 "types": TYPE_B,
Jeremy Johnson015c3552022-02-23 12:15:03 +0000222 },
223 "reduce_min": {
224 "operands": (1, 0),
225 "build_fcn": (TBuilder.ReduceMin, TGen.tgBasic, ArgGen.agAxesListKeepdims),
226 "types": {
227 "tf": TYPE_FI,
228 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
229 },
230 },
231 "reduce_max": {
232 "operands": (1, 0),
233 "build_fcn": (TBuilder.ReduceMax, TGen.tgBasic, ArgGen.agAxesListKeepdims),
234 "types": {
235 "tf": TYPE_FI,
236 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
237 },
238 },
239 "reduce_sum": {
240 "operands": (1, 0),
241 "build_fcn": (TBuilder.ReduceSum, TGen.tgBasic, ArgGen.agAxesListKeepdims),
242 "types": {
243 "tf": TYPE_F,
244 # v2 converter doesn't recognize quantized reduce_sum
245 # "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
246 "tflite": TYPE_F,
247 },
248 },
249 "reduce_mean": {
250 "operands": (1, 0),
251 "build_fcn": (TBuilder.ReduceMean, TGen.tgBasic, ArgGen.agAxesListKeepdims),
252 "types": {
253 "tf": TYPE_F,
254 "tflite": list(
255 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
256 ),
257 },
258 },
259 "reduce_product": {
260 "operands": (1, 0),
261 "build_fcn": (TBuilder.ReduceProduct, TGen.tgBasic, ArgGen.agAxesListKeepdims),
262 "types": TYPE_F,
263 },
264 "min": {
265 "operands": (2, 0),
266 "build_fcn": (TBuilder.Min, TGen.tgBFuzz, ArgGen.agNone),
267 "types": TYPE_FI,
268 },
269 "max": {
270 "operands": (2, 0),
271 "build_fcn": (TBuilder.Max, TGen.tgBFuzz, ArgGen.agNone),
272 "types": TYPE_FI,
273 },
274 "pow": {
275 "operands": (2, 0),
276 "build_fcn": (TBuilder.Pow, TGen.tgBFuzz, ArgGen.agNone),
277 # Technically, integer is supported, but only for positive exponents.
278 # Needs a random argument generator.
279 "types": TYPE_F,
280 },
281 "abs": {
282 "operands": (1, 0),
283 "build_fcn": (TBuilder.Abs, TGen.tgBasic, ArgGen.agNone),
284 "types": TYPE_F,
285 },
286 "ceil": {
287 "operands": (1, 0),
288 "build_fcn": (TBuilder.Ceil, TGen.tgBasic, ArgGen.agNone),
289 "types": TYPE_F,
290 },
291 "floor": {
292 "operands": (1, 0),
293 "build_fcn": (TBuilder.Floor, TGen.tgBasic, ArgGen.agNone),
294 "types": TYPE_F,
295 },
296 "log": {
297 "operands": (1, 0),
298 "build_fcn": (TBuilder.Log, TGen.tgBasic, ArgGen.agNone),
299 "types": TYPE_F,
300 },
301 "negate": {
302 "operands": (1, 0),
303 "build_fcn": (TBuilder.Negate, TGen.tgBasic, ArgGen.agNone),
304 "types": TYPE_F,
305 },
306 "rsqrt": {
307 "operands": (1, 0),
Jerry Geb1f25012023-03-03 11:33:51 -0800308 "build_fcn": (TBuilder.Rsqrt, TGen.tgBasicPositive, ArgGen.agNone),
309 "types": {
310 "tf": TYPE_F,
311 "tflite": list(TYPE_F + [QuantType.ALL_I8]),
312 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000313 },
TatWai Chongd713a4d2022-11-10 13:54:28 -0800314 "sign": {
315 "operands": (1, 0),
316 "build_fcn": (TBuilder.Sign, TGen.tgBasic, ArgGen.agNone),
317 "types": {
318 "tf": TYPE_F,
319 },
320 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000321 "sigmoid": {
322 "operands": (1, 0),
323 "build_fcn": (TBuilder.Sigmoid, TGen.tgBasic, ArgGen.agNone),
324 "types": {
325 "tf": TYPE_F,
326 "tflite": list(
327 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
328 ),
329 },
330 },
331 "tanh": {
332 "operands": (1, 0),
333 "build_fcn": (TBuilder.Tanh, TGen.tgBasic, ArgGen.agNone),
334 "types": {
335 "tf": TYPE_F,
336 "tflite": list(
337 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
338 ),
339 },
340 },
Luke Hutton41601862022-12-06 17:29:15 +0000341 "sin": {
342 "operands": (1, 0),
343 "build_fcn": (TBuilder.Sin, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000344 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000345 },
346 "cos": {
347 "operands": (1, 0),
348 "build_fcn": (TBuilder.Cos, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000349 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000350 },
Luke Hutton2138a192022-12-15 11:01:39 +0000351 "atan2": {
352 "operands": (2, 0),
353 "build_fcn": (TBuilder.Atan2, TGen.tgBasic, ArgGen.agNone),
354 "types": {
355 "tflite": TYPE_F,
356 },
357 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000358 "square": {
359 "operands": (1, 0),
360 "build_fcn": (TBuilder.Square, TGen.tgBasic, ArgGen.agNone),
361 "types": TYPE_F,
362 },
363 "squared_difference": {
364 "operands": (2, 0),
365 "build_fcn": (TBuilder.SquaredDifference, TGen.tgBFuzz, ArgGen.agNone),
366 "types": TYPE_F,
367 },
368 "equal": {
369 "operands": (2, 0),
370 "build_fcn": (TBuilder.Equal, TGen.tgBFuzz, ArgGen.agNone),
371 "types": TYPE_FI,
372 },
373 "greater_equal": {
374 "operands": (2, 0),
375 "build_fcn": (TBuilder.GreaterEqual, TGen.tgBFuzz, ArgGen.agNone),
376 "types": TYPE_FI,
377 },
378 "greater": {
379 "operands": (2, 0),
380 "build_fcn": (TBuilder.Greater, TGen.tgBFuzz, ArgGen.agNone),
381 "types": TYPE_FI,
382 },
383 "less": {
384 "operands": (2, 0),
385 "build_fcn": (TBuilder.Less, TGen.tgBFuzz, ArgGen.agNone),
386 "types": TYPE_FI,
387 },
388 "less_equal": {
389 "operands": (2, 0),
390 "build_fcn": (TBuilder.LessEqual, TGen.tgBFuzz, ArgGen.agNone),
391 "types": TYPE_FI,
392 },
393 "conv2d_TEMPLATE": {
394 "operands": (1, 1),
395 "build_fcn": (TBuilder.Conv2d, TGen.tgConv2d, ArgGen.agConv2d),
396 "types": {
397 "tf": [tf.float32],
398 "tflite": [
399 tf.float32,
400 QuantType.CONV_U8_U8,
401 QuantType.CONV_I8_I8,
402 QuantType.CONV_I16_I8,
403 ],
404 },
405 "template": True,
406 },
407 "conv2d_relu_TEMPLATE": {
408 "operands": (1, 2),
409 "build_fcn": (TBuilder.Conv2dRelu, TGen.tgConv2d, ArgGen.agNone),
410 "types": {
411 "tf": [tf.float32],
412 "tflite": [
413 tf.float32,
414 QuantType.CONV_U8_U8,
415 QuantType.CONV_I8_I8,
416 QuantType.CONV_I16_I8,
417 ],
418 },
419 "template": True,
420 },
421 "conv2d_relu6_TEMPLATE": {
422 "operands": (1, 2),
423 "build_fcn": (TBuilder.Conv2dRelu6, TGen.tgConv2d, ArgGen.agNone),
424 "types": {
425 "tf": [tf.float32],
426 "tflite": [
427 tf.float32,
428 QuantType.CONV_U8_U8,
429 QuantType.CONV_I8_I8,
430 QuantType.CONV_I16_I8,
431 ],
432 },
433 "template": True,
434 },
435 "conv2d_relu_n1_to_1_TEMPLATE": {
436 "operands": (1, 2),
437 "build_fcn": (TBuilder.Conv2dReluN1To1, TGen.tgConv2d, ArgGen.agNone),
438 "types": {
439 "tf": [tf.float32],
440 "tflite": [
441 tf.float32,
442 QuantType.CONV_U8_U8,
443 QuantType.CONV_I8_I8,
444 QuantType.CONV_I16_I8,
445 ],
446 },
447 "template": True,
448 },
449 # This test is converted as:
450 # tfl.conv2d(){fused_activation_function="NONE"} + tfl.tanh()
451 # TODO: anyway to generate tfl.conv2d(){fused_activation_function="TANH"}?
452 "conv2d_tanh_TEMPLATE": {
453 "operands": (1, 2),
454 "build_fcn": (TBuilder.Conv2dTanh, TGen.tgConv2d, ArgGen.agNone),
455 "types": {
456 "tf": [tf.float32],
457 "tflite": [
458 tf.float32,
459 QuantType.CONV_U8_U8,
460 QuantType.CONV_I8_I8,
461 QuantType.CONV_I16_I8,
462 ],
463 },
464 "template": True,
465 },
466 "conv2d_bias_TEMPLATE": {
467 "operands": (1, 2),
468 "build_fcn": (TBuilder.Conv2dWithBias, TGen.tgConv2d, ArgGen.agConv2d),
469 "types": {
470 "tf": [tf.float32],
471 "tflite": [
472 tf.float32,
473 QuantType.CONV_U8_U8,
474 QuantType.CONV_I8_I8,
475 QuantType.CONV_I16_I8,
476 ],
477 },
478 "bias": True,
479 "template": True,
480 },
TatWai Chongfd629052022-07-25 04:01:58 +0000481 "conv3d_TEMPLATE": {
482 "operands": (1, 1),
483 "build_fcn": (TBuilder.Conv3d, TGen.tgConv3d, ArgGen.agConv3d),
484 "types": {
485 "tf": [tf.float32],
486 "tflite": [
487 tf.float32,
488 QuantType.CONV_U8_U8,
489 QuantType.CONV_I8_I8,
490 # Quantization to 16x8-bit not yet supported by tflite.
491 ],
492 },
493 "template": True,
494 "rank": (1, 5),
495 },
496 "conv3d_bias_TEMPLATE": {
497 "operands": (1, 2),
498 "build_fcn": (TBuilder.Conv3dWithBias, TGen.tgConv3d, ArgGen.agConv3d),
499 "types": {
500 "tf": [tf.float32],
501 "tflite": [
502 tf.float32,
503 QuantType.CONV_U8_U8,
504 QuantType.CONV_I8_I8,
505 # Quantization to 16x8-bit not yet supported by tflite.
506 ],
507 },
508 "bias": True,
509 "template": True,
510 "rank": (1, 5),
511 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000512 "depthwise_conv2d_TEMPLATE": {
513 "operands": (1, 1),
514 "build_fcn": (
515 TBuilder.DepthwiseConv2d,
516 TGen.tgDepthwiseConv2d,
517 ArgGen.agDepthwiseConv2d,
518 ),
519 "types": {
520 "tf": [tf.float32],
521 "tflite": [
522 tf.float32,
523 QuantType.CONV_U8_U8,
524 QuantType.CONV_I8_I8,
525 QuantType.CONV_I16_I8,
526 ],
527 },
528 "template": True,
529 },
530 "depthwise_conv2d_bias_TEMPLATE": {
531 "operands": (1, 2),
532 "build_fcn": (
533 TBuilder.DepthwiseConv2dWithBias,
534 TGen.tgDepthwiseConv2d,
535 ArgGen.agDepthwiseConv2d,
536 ),
537 "types": {
538 "tf": [tf.float32],
539 "tflite": [
540 tf.float32,
541 QuantType.CONV_U8_U8,
542 QuantType.CONV_I8_I8,
543 QuantType.CONV_I16_I8,
544 ],
545 },
546 "bias": True,
547 "template": True,
548 },
549 "transpose_conv2d_TEMPLATE": {
550 "operands": (1, 1),
551 "build_fcn": (
552 TBuilder.TransposeConv2d,
553 TGen.tgTransposeConv2d,
554 ArgGen.agTransposeConv2d,
555 ),
556 "types": {
557 "tf": [tf.float32],
558 "tflite": [
559 tf.float32,
560 QuantType.CONV_U8_U8,
561 QuantType.CONV_I8_I8,
562 QuantType.CONV_I16_I8,
563 ],
564 },
565 "template": True,
566 },
567 "argmax": {
568 "operands": (1, 0),
569 "build_fcn": (TBuilder.Argmax, TGen.tgBasic, ArgGen.agAxes),
570 "types": {"tf": TYPE_F},
571 },
572 "avg_pool2d": {
573 "operands": (1, 0),
574 "build_fcn": (TBuilder.AvgPool2d, TGen.tgPooling, ArgGen.agPooling),
575 "types": {
576 "tf": TYPE_F,
577 "tflite": list(
578 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
579 ),
580 },
581 },
582 "max_pool2d": {
583 "operands": (1, 0),
584 "build_fcn": (TBuilder.MaxPool2d, TGen.tgPooling, ArgGen.agPooling),
585 "types": {
586 "tf": TYPE_F,
587 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
588 # ALL_I16 not supported yet
589 # In tensorflow/compiler/mlir/lite/ir/tfl_ops.td,
590 # QI16 is missing from MaxPoolOperandAndResultConstraints
591 # If adding QI16 back this test can run through.
592 },
593 },
594 "reshape": {
595 "operands": (1, 0),
596 "build_fcn": (TBuilder.Reshape, TGen.tgBasic, ArgGen.agReshape),
597 "types": TYPE_FI,
598 },
599 "transpose": {
600 "operands": (1, 0),
601 "build_fcn": (TBuilder.Transpose, TGen.tgBasic, ArgGen.agTranspose),
602 "types": TYPE_FI,
603 },
604 "slice": {
605 "operands": (1, 0),
606 "build_fcn": (TBuilder.Slice, TGen.tgBasic, ArgGen.agSlice),
607 "types": TYPE_FI,
608 },
609 "strided_slice": {
610 "operands": (1, 0),
611 "build_fcn": (TBuilder.StridedSlice, TGen.tgBasic, ArgGen.agStridedSlice),
612 "types": TYPE_FI,
613 },
614 "select": {
615 "operands": (3, 0),
616 "build_fcn": (TBuilder.Select, TGen.tgSelect, ArgGen.agNone),
617 "types": TYPE_FI,
618 },
619 "addn": {
620 "operands": (4, 0),
621 "build_fcn": (TBuilder.Addn, TGen.tgBasic, ArgGen.agNone),
622 "types": TYPE_FI,
623 },
624 "concatv2": {
625 "operands": (4, 0),
626 "build_fcn": (TBuilder.Concatv2, TGen.tgBasic, ArgGen.agAxes),
627 "types": TYPE_FI,
628 },
629 "stack": {
630 "operands": (4, 0),
631 "build_fcn": (TBuilder.Stack, TGen.tgBasic, ArgGen.agStack),
632 "types": TYPE_FI,
633 },
634 "unstack": {
635 "operands": (1, 0),
636 "build_fcn": (TBuilder.Unstack, TGen.tgPooling, ArgGen.agAxes),
637 "types": TYPE_F,
638 },
TatWai Chongf7008da2022-09-09 09:35:40 +0000639 "mirrorpad": {
640 "operands": (1, 0),
641 "build_fcn": (TBuilder.MirrorPad, TGen.tgBasic, ArgGen.agMirrorPad),
642 "types": TYPE_FI,
643 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000644 "pad": {
645 "operands": (1, 0),
646 "build_fcn": (TBuilder.Pad, TGen.tgBasic, ArgGen.agPad),
TatWai Chong2226f902023-02-22 18:38:01 -0800647 "types": {
648 "tf": TYPE_F,
649 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
650 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000651 },
652 "expand_dims": {
653 "operands": (1, 0),
654 "build_fcn": (TBuilder.ExpandDims, TGen.tgBasic, ArgGen.agStack),
655 "types": TYPE_FI,
656 },
657 "shape": {
658 "operands": (1, 0),
659 "build_fcn": (TBuilder.Shape, TGen.tgBasic, ArgGen.agNone),
660 "types": TYPE_FI,
661 },
662 "rank": {
663 "operands": (1, 0),
664 "build_fcn": (TBuilder.Rank, TGen.tgBasic, ArgGen.agNone),
665 "types": TYPE_FI,
666 },
667 "fill": {
668 "operands": (1, 0),
669 "build_fcn": (TBuilder.Fill, TGen.tgBasic, ArgGen.agFill),
670 "types": TYPE_FI,
671 },
672 "elu": {
673 "operands": (1, 0),
674 "build_fcn": (TBuilder.Elu, TGen.tgBasic, ArgGen.agNone),
675 "types": TYPE_F,
676 },
677 "softmax": {
678 "operands": (1, 0),
679 "build_fcn": (TBuilder.Softmax, TGen.tgBasic, ArgGen.agNone),
680 "types": {
681 "tf": TYPE_F,
682 "tflite": list(
683 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
684 ),
685 },
686 },
687 "log_softmax": {
688 "operands": (1, 0),
689 "build_fcn": (TBuilder.LogSoftmax, TGen.tgBasic, ArgGen.agNone),
690 "types": TYPE_F,
691 },
692 "matmul": {
693 "operands": (2, 0),
694 "build_fcn": (TBuilder.MatMul, TGen.tgMatmul, ArgGen.agNone),
695 "types": {
696 "tf": TYPE_F,
697 "tflite": list(
698 TYPE_F
699 + [QuantType.ALL_U8, QuantType.ALL_I8]
700 # 16 bits matmul fail to convert
701 ),
702 },
703 },
704 "add_scalar": {
705 "operands": (1, 0),
706 "build_fcn": (TBuilder.AddScalar, TGen.tgBasic, ArgGen.agNone),
707 "types": TYPE_F,
708 },
709 "add_1d": {
710 "operands": (2, 0),
711 "build_fcn": (TBuilder.Add1d, TGen.tgBasic, ArgGen.agNone),
712 "types": TYPE_F,
713 },
714 "split": {
715 "operands": (1, 0),
716 "build_fcn": (TBuilder.Split, TGen.tgBasic, ArgGen.agSplit),
717 "types": TYPE_FI,
718 },
719 "tile": {
720 "operands": (1, 0),
721 "build_fcn": (TBuilder.Tile, TGen.tgBasic, ArgGen.agTile),
722 "types": TYPE_FI,
723 },
724 "reverse": {
725 "operands": (1, 0),
726 "build_fcn": (TBuilder.Reverse, TGen.tgBasic, ArgGen.agAxes),
727 "types": {"tf": TYPE_FI},
728 },
729 "gather": {
730 "operands": (1, 0),
731 "build_fcn": (TBuilder.Gather, TGen.tgBasic, ArgGen.agGather),
732 "types": TYPE_FI,
733 },
734 "gather_nd": {
735 "operands": (1, 0),
736 "build_fcn": (TBuilder.GatherNd, TGen.tgBasic, ArgGen.agGatherND),
737 "types": TYPE_FI,
738 },
739 "scatter_nd": {
740 "operands": (1, 0),
741 "build_fcn": (TBuilder.ScatterNd, TGen.tgBasic, ArgGen.agScatterND),
742 "types": TYPE_FI,
743 },
744 "space_to_batch": {
745 "operands": (1, 0),
746 "build_fcn": (TBuilder.SpaceToBatch, TGen.tgBasic, ArgGen.agSpaceToBatch),
747 "types": TYPE_F,
748 },
749 "batch_to_space": {
750 "operands": (1, 0),
751 "build_fcn": (TBuilder.BatchToSpace, TGen.tgBasic, ArgGen.agBatchToSpace),
752 "types": TYPE_F,
753 },
754 "space_to_depth": {
755 "operands": (1, 0),
756 "build_fcn": (TBuilder.SpaceToDepth, TGen.tgBasic, ArgGen.agSpaceToDepth),
757 "types": TYPE_F,
758 },
759 "depth_to_space": {
760 "operands": (1, 0),
761 "build_fcn": (TBuilder.DepthToSpace, TGen.tgBasic, ArgGen.agDepthToSpace),
762 "types": TYPE_F,
763 },
764 "one_hot": {
765 "operands": (3, 1),
766 "build_fcn": (TBuilder.OneHot, TGen.tgOneHot, ArgGen.agOneHot),
767 "types": TYPE_FI,
768 },
769 "fakequant": {
770 "operands": (1, 0),
771 "build_fcn": (
772 TBuilder.Fakequant,
773 TGen.tgBasic,
774 ArgGen.agFakequant,
775 ),
776 "types": {"tf": TYPE_F},
777 },
TatWai Chong0cef07e2023-02-27 13:22:52 -0800778 "resize": {
Jeremy Johnson015c3552022-02-23 12:15:03 +0000779 "operands": (1, 0),
TatWai Chong0cef07e2023-02-27 13:22:52 -0800780 "build_fcn": (TBuilder.Resize, TGen.tgPooling, ArgGen.agResize),
TatWai Chongf7326092022-06-08 12:17:14 -0700781 "types": {
782 "tf": TYPE_F,
783 "tflite": list(
784 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
785 ),
786 },
Jerry Ge5dd5a552023-05-23 22:41:20 +0000787 "custom_shapes": {
788 "custom_shape_only": False,
789 "shape_list": [(3, 1, 1, 7)],
790 },
TatWai Chongf7326092022-06-08 12:17:14 -0700791 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000792 "left_shift": {
793 "operands": (1, 0),
794 "build_fcn": (TBuilder.LeftShift, TGen.tgBasic, ArgGen.agShift),
795 "types": {"tf": [tf.int32]},
796 },
797 "right_shift": {
798 "operands": (1, 0),
799 "build_fcn": (TBuilder.RightShift, TGen.tgBasic, ArgGen.agShift),
800 "types": {
801 "tf": [
802 tf.int32,
803 ]
804 },
805 },
Jerry Ge9e94af82022-10-27 09:57:00 -0700806 "while": {
807 "operands": (1, 0),
808 "build_fcn": (TBuilder.While, TGen.tgBasic, ArgGen.agNone),
809 "types": {
810 "tflite": list(TYPE_F),
811 },
812 },
813 "lstm": {
814 "operands": (1, 0),
815 "build_fcn": (TBuilder.LSTM, TGen.tgRecurrent, ArgGen.agNone),
816 "types": {
817 "tflite": [
818 tf.float32,
819 # tf.int32
820 ]
821 },
822 },
823 "gru": {
824 "operands": (1, 0),
825 "build_fcn": (TBuilder.GRU, TGen.tgRecurrent, ArgGen.agNone),
826 "types": {
827 "tflite": [
828 tf.float32,
829 # tf.int32
830 ]
831 },
832 },
833 "rnn": {
834 "operands": (1, 0),
835 "build_fcn": (TBuilder.RNN, TGen.tgRecurrent, ArgGen.agNone),
836 "types": {
837 "tflite": [
838 tf.float32,
839 ]
840 },
841 },
Luke Hutton261b7b62023-01-10 14:50:31 +0000842 "rfft2d": {
843 "operands": (1, 0),
844 "build_fcn": (TBuilder.RFFT2d, TGen.tgRFFT2d, ArgGen.agRFFT2d),
845 "types": {
846 "tflite": TYPE_F,
847 },
848 },
Luke Hutton714aa602023-02-08 19:45:26 +0000849 "real": {
850 "operands": (1, 0),
851 "build_fcn": (TBuilder.Real, TGen.tgComplexComponents, ArgGen.agNone),
852 "types": {
853 "tflite": [tf.complex64],
854 },
855 },
856 "imag": {
857 "operands": (1, 0),
858 "build_fcn": (TBuilder.Imag, TGen.tgComplexComponents, ArgGen.agNone),
859 "types": {
860 "tflite": [tf.complex64],
861 },
862 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000863}
864
865# Shapes to be tested; default can be overwritten
866shape_list = [
867 (1,),
868 (64,),
869 (14, 19),
870 (13, 21, 3),
Luke Hutton261b7b62023-01-10 14:50:31 +0000871 (1, 8, 16),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000872 (1, 4, 4, 4),
873 (1, 8, 4, 17),
874 (1, 4, 8, 19),
875 (1, 32, 32, 8),
876 (1, 7, 7, 9),
TatWai Chong0cef07e2023-02-27 13:22:52 -0800877 (3, 1, 1, 7),
TatWai Chongfd629052022-07-25 04:01:58 +0000878 (2, 2, 7, 7, 2),
879 (1, 4, 8, 21, 17),
880 (3, 32, 16, 16, 5),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000881]
882
883
884def gen_rand_shapes(args):
885 """Overwrite the global shape list with a new list of random shapes"""
886 global shape_list
887
888 rng = np.random.default_rng(args.random_seed)
889
890 # Don't let things get too big... cap the maximum volume, but let
891 # an individual dimension be 1..47
892 max_total_volume = 32 * 32 * 4
893
894 shape_list = []
TatWai Chongfd629052022-07-25 04:01:58 +0000895 # Only iterate over ranks 2, 3, 4, and 5
896 for rank in range(2, 6):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000897 for n in range(args.random_shapes):
898 new_shape = rng.integers(1, 48, size=rank)
899
TatWai Chongfd629052022-07-25 04:01:58 +0000900 # Set the batch dimension on 4D or 5D objects to 1
901 if rank == 4 or rank == 5:
Jeremy Johnson015c3552022-02-23 12:15:03 +0000902 new_shape[0] = 1
903
904 # Limit the total shape volume and throw out any
905 # shapes that wouldn't leave at least size=2 in some non-batch dimension
906 volume = 1
907 skip_shape = False
908 for i in range(rank):
909
910 volume *= new_shape[i]
911
912 # Reduce the shape, while it's larger than the maximum volume
913 while volume > max_total_volume:
914 new_shape[i] = new_shape[i] // 2
915 volume = volume // 2
916
917 # Now an untenable dimension size? Skip this one.
918 if new_shape[i] < 1:
919 skip_shape = True
920
921 if not skip_shape:
922 shape_list.append(tuple(new_shape))
923
924
925# Construct, run and save a whole tensorflow tf.function to a protobuf file
926# or convert to .tflite if it's quantized unit test
927def run_unit_test(
928 op_name,
929 args,
930 test_dir,
931 curr_shape,
932 addl_args,
933 dtype,
934 excluded_framework_list,
935 quantized_inference_dtype,
936 result_name,
937 seed,
938):
939
940 try:
941 op = TF_OP_LIST[op_name]
942 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
943
944 # Get and seed a random number generator for this test
945 rng = np.random.default_rng(seed)
946
947 # return placeholders=(str: name, np.array: value)
948 # consts=(str: name, np.array: value)
949 placeholders, consts = tensor_gen_fcn(op, curr_shape, dtype, rng)
950
951 # if test doesn't have any placeholders/consts, terminated
952 if len(placeholders) == 0 and len(consts) == 0:
953 return True
954
955 if not args.quiet:
956 print(" {} ".format(test_dir))
957
958 try:
959 os.mkdir(test_dir)
960 except FileExistsError:
961 pass
962
963 const_nodes = [value for name, value in consts]
964
965 num_placeholders = len(placeholders)
966 # if test is quantized, create tensor quantization metadata info for
967 # each input tensor, based on different quantized type
968 if quantized_inference_dtype:
969 is_quantized = True
970 # TODO: support INT8 IFM x INT4 weight later
971 if quantized_inference_dtype == QuantType.ALL_U8:
972 qzero = [128] * num_placeholders
973 numpy_dtype = [np.uint8] * num_placeholders
974 tflite_inference_dtype = tf.uint8
975 elif quantized_inference_dtype == QuantType.ALL_I8:
976 qzero = [0] * num_placeholders
977 numpy_dtype = [np.int8] * num_placeholders
978 tflite_inference_dtype = tf.int8
979 elif quantized_inference_dtype == QuantType.ALL_I16:
980 qzero = [0] * num_placeholders
981 numpy_dtype = [np.int16] * num_placeholders
982 tflite_inference_dtype = tf.int16
983 elif quantized_inference_dtype == QuantType.CONV_U8_U8:
984 assert (
985 num_placeholders == 1
986 ), "Unsupported number of placeholders for Convolution: {}".format(
987 num_placeholders
988 )
989 qzero = [128] * num_placeholders
990 if num_placeholders == 2:
991 numpy_dtype = [np.uint8, np.uint8]
992 else:
993 numpy_dtype = [np.uint8, np.uint8, np.int32]
994 tflite_inference_dtype = tf.uint8
995 elif quantized_inference_dtype == QuantType.CONV_I8_I8:
996 assert (
997 num_placeholders == 1
998 ), "Unsupported number of placeholders for Convolution: {}".format(
999 num_placeholders
1000 )
1001 qzero = [0] * num_placeholders
1002 if num_placeholders == 2:
1003 numpy_dtype = [np.int8, np.int8]
1004 else:
1005 numpy_dtype = [np.int8, np.int8, np.int32]
1006 tflite_inference_dtype = tf.int8
1007 elif quantized_inference_dtype == QuantType.CONV_I16_I8:
1008 assert (
1009 num_placeholders == 1
1010 ), "Unsupported number of placeholders for Convolution: {}".format(
1011 num_placeholders
1012 )
1013 if num_placeholders == 2:
1014 qzero = [0, 0]
1015 numpy_dtype = [np.int16, np.int8]
1016 else:
1017 qzero = [0, 0, 0]
1018 numpy_dtype = [
1019 np.int16,
1020 np.int8,
1021 np.int64,
1022 ] # np.int64 to represent 40 bits accumulator
1023 tflite_inference_dtype = tf.int16
1024 else:
1025 raise Exception(
1026 "Unsupported fakequant dtype: {}".format(quantized_inference_dtype)
1027 )
1028
1029 else:
1030 is_quantized = False
1031
1032 tf_model_filename = None
1033 tf_result_npy_filename = None
1034 tf_result_name = None
1035
1036 tflite_model_filename = None
1037 tflite_result_npy_filename = None
1038 tflite_result_name = None
1039
1040 placeholder_names = []
1041 placeholder_vals = []
1042 placeholder_signatures = ()
1043 placeholder_npy_filenames = []
1044 placeholder_shapes = []
1045
1046 for idx, (name, val) in enumerate(placeholders):
1047 placeholder_names.append(name)
1048 placeholder_signatures = placeholder_signatures + (
1049 tf.TensorSpec(shape=val.shape, dtype=val.dtype, name=name),
1050 )
1051 placeholder_npy_filenames.append("{}.npy".format(name.split(":")[0]))
1052 placeholder_shapes.append(val.shape)
1053
1054 # Get test builder class
1055 fcn_node = op_fcn(*const_nodes, *addl_args, result_name)
1056 concrete_function = tf.function(input_signature=placeholder_signatures)(
1057 fcn_node.eval
1058 ).get_concrete_function()
1059
1060 if is_quantized:
1061
1062 assert dtype is tf.float32, "quantized test must come from float32 graph"
1063
1064 # 1. Quantize float placeholder npy to quantized to feed the graph
1065 for idx, (name, val) in enumerate(placeholders):
1066
1067 # we use np.amin()/np.amax() to determine dynamic range
1068 # for quantized test
1069 zeropoint = 0
1070 scale = 1.0
1071 if numpy_dtype[idx] != np.int64:
1072 qmin = np.iinfo(numpy_dtype[idx]).min
1073 qmax = np.iinfo(numpy_dtype[idx]).max
1074 num_bits = np.iinfo(numpy_dtype[idx]).bits
1075 # 40 bit is represented as np.int64
1076 else:
1077 num_bits = 40
1078 qmin = -(1 << num_bits)
1079 qmax = (1 << num_bits) - 1
1080
1081 min_val = np.amin(val)
1082 max_val = np.amax(val)
1083
1084 # for single value tensor, we set scale equal to the abs(value),
1085 # and fix zeropoint to 128
1086 # if val > 0, it'll be represented as 129,
1087 # where val = (129 - 128) * val
1088 # if val < 0, it'll be represented as 127,
1089 # where val = (127 - 128) * (-val)
1090 # if val == 0, it'll be represted as 128, with range [-128.0, 128.0]
1091 # and let quantized 1 represent the value
1092 # also adjust effective min/max consequently
1093 if max_val == min_val:
1094 if max_val != 0:
1095 scale = abs(max_val)
1096 else:
1097 scale = 1.0
1098 min_val = float(qmin - qzero[idx]) * scale
1099 max_val = float(qmax - qzero[idx]) * scale
1100 else:
1101 scale = (max_val - min_val) / float(qmax - qmin)
1102 zeropoint = int(round((-min_val) / scale)) + qmin
1103
1104 # run through tf.fakequant first to assure quantization error aligned
1105 fakequant_val = tf.quantization.fake_quant_with_min_max_args(
1106 val,
1107 min=min_val,
1108 max=max_val,
1109 num_bits=num_bits,
1110 name="gen_quant_npy",
1111 )
1112
1113 quant_val = np.round(fakequant_val / scale).astype(np.int32) + zeropoint
1114
1115 # very few unit tests after TF hash may/2020, this quantized
1116 # value for some reason exceed [0, 255] range
1117 saved_val = np.clip(quant_val, qmin, qmax).astype(numpy_dtype[idx])
1118
1119 # saved all quantized tensor as np.int32
1120 # since TOSA numpy Cpp API only supports int32
1121 np.save(
1122 os.path.join(test_dir, placeholder_npy_filenames[idx]),
1123 saved_val.astype(np.int32),
1124 False,
1125 )
1126
1127 placeholder_vals.append(tf.convert_to_tensor(saved_val))
1128
1129 # 2. Convert the model to quantized TFLite flatbuffer
1130 module = tf.Module()
1131 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1132 [concrete_function], module
1133 )
1134 converter.optimizations = [tf.lite.Optimize.DEFAULT]
1135 converter.experimental_new_converter = True
1136
1137 # use MLIR-based post-quantizer
1138 converter.experimental_new_quantizer = True
1139
1140 flag = (
1141 tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 # noqa: E501
1142 )
1143 if tflite_inference_dtype == tf.int16:
1144 converter.target_spec.supported_ops = [flag]
1145
1146 def input_stats():
Jerry Geb1f25012023-03-03 11:33:51 -08001147 ## Rsqrt can only handle positive numbers
1148 elem_signedness = ElemSignedness.ALL_RANGE
1149 if op_name == "rsqrt":
1150 elem_signedness = ElemSignedness.POSITIVE
1151
Jeremy Johnson015c3552022-02-23 12:15:03 +00001152 for i in range(0, args.num_samples):
1153 a = [
Jerry Geb1f25012023-03-03 11:33:51 -08001154 TGen.getRand(shape, tf.float32, rng, elem_signedness)
Jeremy Johnson015c3552022-02-23 12:15:03 +00001155 for shape in placeholder_shapes
1156 ]
1157 yield a
1158
1159 converter.representative_dataset = input_stats
1160 converter.inference_input_type = tflite_inference_dtype
1161 converter.inference_output_type = tflite_inference_dtype
1162
1163 tflite_model = converter.convert()
1164
1165 tflite_model_filename = "model.tflite"
1166
1167 # Write out converted model to disk
1168 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1169 f.write(tflite_model)
1170
1171 else: # is_quantized is False
1172
1173 # 1. Saved out numpy array directly
1174 for idx, (name, val) in enumerate(placeholders):
1175 placeholder_vals.append(tf.convert_to_tensor(val))
Luke Hutton714aa602023-02-08 19:45:26 +00001176
1177 # Complex tensors are expected to be repsesented by a
1178 # single floating point tensor of shape [?, ..., ?, 2].
1179 if val.dtype == np.complex64:
1180 val_shape = val.shape + (2,)
1181 val = val.view(np.float32)
1182 val = val.reshape(val_shape)
1183
Jeremy Johnson015c3552022-02-23 12:15:03 +00001184 np.save(
1185 os.path.join(test_dir, placeholder_npy_filenames[idx]), val, False
1186 )
1187
1188 # 2.a Saved out .pb if framework includes tensorflow
1189 if "tf" not in excluded_framework_list:
1190 # Write out graph as protobuf to disk
1191 tf_model_filename = "model.pb"
1192 tf.io.write_graph(
1193 concrete_function.graph, test_dir, tf_model_filename, True
1194 )
1195
1196 # 2.b Saved out .tflite if framework includes tflite
1197 if "tflite" not in excluded_framework_list:
1198 # Convert the model to TFLite flatbuffer
1199 module = tf.Module()
1200 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1201 [concrete_function], module
1202 )
1203
1204 converter.experimental_new_converter = True
1205
1206 # Even it's non-quantized int32 test, this needs to be set to tf.float32
1207 converter.inference_input_type = tf.float32
1208 converter.inference_output_type = tf.float32
1209 tflite_model = converter.convert()
1210
1211 # Write out converted model to disk
1212 tflite_model_filename = "model.tflite"
1213 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1214 f.write(tflite_model)
1215
1216 # Get TF reference result if .pb is specified
1217 if tf_model_filename:
1218 tf_result_npy_filename = "tf_result.npy"
1219 tf_result = concrete_function(*placeholder_vals)
1220 np.save(os.path.join(test_dir, tf_result_npy_filename), tf_result, False)
1221
1222 tf_result_name = result_name
1223
1224 # Get TFLite inference result if .tflite is specified
1225 if tflite_model_filename:
1226 tflite_result_npy_filename = "tflite_result.npy"
1227
Luke Hutton5c844212023-01-27 14:17:52 +00001228 ops_with_optimized_only_kernel = ["elu", "ceil", "gather", "rfft2d"]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001229
1230 if args.tflite_kernel_mode == "optimized" or (
1231 op_name in ops_with_optimized_only_kernel
1232 ):
1233 interpreter = tf.lite.Interpreter(
1234 model_path=os.path.join(test_dir, tflite_model_filename)
1235 )
1236 elif args.tflite_kernel_mode == "reference":
1237 interpreter = tf.lite.Interpreter(
1238 model_path=os.path.join(test_dir, tflite_model_filename),
1239 experimental_op_resolver_type=OpResolverType.BUILTIN_REF,
1240 )
1241 else:
1242 assert 0, "unknown tflite interpreter mode {}".format(
1243 args.tflite_kernel_mode
1244 )
1245 interpreter.allocate_tensors()
1246
1247 input_details = interpreter.get_input_details()
1248 output_details = interpreter.get_output_details()
1249
1250 assert len(input_details) == len(
1251 placeholder_vals
1252 ), "number of placeholder mismatch"
1253
1254 for idx, val in enumerate(placeholder_vals):
1255 interpreter.set_tensor(input_details[idx]["index"], val.numpy())
1256
1257 interpreter.invoke()
1258 tflite_result = interpreter.get_tensor(output_details[0]["index"])
1259
1260 np.save(
1261 os.path.join(test_dir, tflite_result_npy_filename), tflite_result, False
1262 )
1263
1264 # Result tensor name would change after converting to TFLite flatbuffer
1265 # Overwrite the information from TFLite models directly.
1266 # Assume single result tensor now
1267 tflite_result_name = output_details[0]["name"]
1268
1269 # Write out test descriptor
1270 write_test_json(
1271 filename=os.path.join(test_dir, "test.json"),
1272 tf_model_filename=tf_model_filename,
1273 tf_result_npy_filename=tf_result_npy_filename,
1274 tf_result_name=tf_result_name,
1275 tflite_model_filename=tflite_model_filename,
1276 tflite_result_npy_filename=tflite_result_npy_filename,
1277 tflite_result_name=tflite_result_name,
1278 ifm_name=placeholder_names,
1279 ifm_file=placeholder_npy_filenames,
1280 ifm_shape=placeholder_shapes,
1281 framework_exclusions=excluded_framework_list,
1282 quantized=is_quantized,
1283 )
1284 except Exception as e:
1285 msg = "Error running task: {}".format(e)
1286 print(msg)
1287 print(
1288 "".join(
1289 traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)
1290 )
1291 )
1292 return False
1293 return True
1294
1295
1296def build_const_net(
1297 args,
1298 curr_shape,
1299 op_name,
1300 dtype,
1301 excluded_framework_list,
1302 quantized_inference_dtype,
1303 result_name,
1304 seed,
1305 rng,
1306 filter,
1307 unit_test_args,
1308):
1309
1310 if quantized_inference_dtype:
1311 quant_dtype = get_tf_dtype(quantized_inference_dtype)
1312 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, quant_dtype))
1313 else:
1314 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, dtype))
1315 test_dir = os.path.join(args.output_dir, test_dir)
1316
1317 # If the operator has an additional function to generate arguments, call it
1318 # here and iterate through the argument list that it generates
1319 op = TF_OP_LIST[op_name]
1320 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1321
TatWai Chongfd629052022-07-25 04:01:58 +00001322 try:
1323 rank_lo, rank_hi = op["rank"]
1324 except KeyError:
1325 # Set testing rank to (1, 4) in default.
1326 rank_lo = 1
1327 rank_hi = 4
1328
1329 if len(curr_shape) not in range(rank_lo, rank_hi + 1):
1330 return
1331
Jeremy Johnson015c3552022-02-23 12:15:03 +00001332 addl_args_tuple = arg_gen_fcn(op, curr_shape, rng)
1333 for desc, addl_args in addl_args_tuple:
Jeremy Johnson0e6218e2022-05-05 17:08:04 +01001334 # Only filter on the full test_name, not the output directory
1335 _, test_name = os.path.split(test_dir + desc)
1336 if not filter or filter.search(test_name):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001337 unit_test_args.append(
1338 [
1339 op_name,
1340 args,
1341 test_dir + desc,
1342 curr_shape,
1343 addl_args,
1344 dtype,
1345 excluded_framework_list,
1346 quantized_inference_dtype,
1347 result_name,
1348 seed,
1349 ]
1350 )
1351
1352
1353# python hash is not reproducible, create hash for our purpose
1354def op_name_hash(op_name):
1355 result = 0xDEADBEEF
1356 for ch in op_name:
1357 if result & 1:
1358 result = (ord(ch) << 24) ^ (result >> 1) ^ 0x82608EDB
1359 else:
1360 result = (ord(ch) << 24) ^ (result >> 1)
1361
1362 return result
1363
1364
1365def generate_op_tests(args, op_name, shape_list, result_name, filter, unit_test_args):
1366
1367 if not args.quiet:
1368 print(
1369 "Generating tests for {} ".format(
1370 op_name
1371 )
1372 )
1373
1374 op = TF_OP_LIST[op_name]
1375
1376 # Seed the RNG so that we get the same random tests for each test each time
1377 # If the number of tests for a given generation function changes, the tests
1378 # for that operator may also change accordingly, but this will at least keep
1379 # down churn across operators.
1380
1381 bounded_hash_val = (args.random_seed + op_name_hash(op_name)) % np.iinfo(
1382 np.int32
1383 ).max
1384 rng = np.random.default_rng(bounded_hash_val)
1385
1386 # this is a dictionary with 'tf' and 'tflite' as key
1387 # and value being the data types we want to test under these framework
1388
1389 if isinstance(op["types"], dict):
1390 try:
1391 tf_dtypes = op["types"]["tf"]
1392 except KeyError:
1393 tf_dtypes = []
1394 try:
1395 tflite_dtypes = op["types"]["tflite"]
1396 except KeyError:
1397 tflite_dtypes = []
1398 elif isinstance(op["types"], list):
1399 tf_dtypes = op["types"]
1400 tflite_dtypes = op["types"]
1401
1402 tf_nonquantized_dtypes = tf_dtypes # tf doesn't support quantized data types
1403 tflite_quantized_dtypes = []
1404 tflite_nonquantized_dtypes = []
1405 for dtype in tflite_dtypes:
1406 if isinstance(dtype, QuantType):
1407 tflite_quantized_dtypes.append(dtype)
1408 else:
1409 tflite_nonquantized_dtypes.append(dtype)
1410
1411 nonquantized_dtypes_set = set(tf_nonquantized_dtypes).union(
1412 set(tflite_nonquantized_dtypes)
1413 )
1414 nonquantized_dtypes = list(nonquantized_dtypes_set)
1415 quantized_dtypes = tflite_quantized_dtypes
1416
Jerry Ge5dd5a552023-05-23 22:41:20 +00001417 # append custom_shapes or replace shape_list with custom_shapes
1418 try:
1419 custom_shapes = op["custom_shapes"]
1420 if custom_shapes["custom_shape_only"]:
1421 shape_list = custom_shapes["shape_list"]
1422 else:
Jerry Geabdac232023-06-12 16:27:16 +00001423 shape_list = shape_list.copy()
1424 shape_list.append(custom_shapes["shape_list"])
Jerry Ge5dd5a552023-05-23 22:41:20 +00001425 except KeyError:
1426 pass
1427
Jeremy Johnson015c3552022-02-23 12:15:03 +00001428 # populate non quantized unit test arguments
1429 for dtype in nonquantized_dtypes:
1430
1431 excluded_framework_set = set(ALL_FRAMEWORKS)
1432 if dtype in tf_nonquantized_dtypes:
1433 excluded_framework_set.remove("tf")
1434 if dtype in tflite_nonquantized_dtypes:
1435 excluded_framework_set.remove("tflite")
1436 excluded_framework_list = list(excluded_framework_set)
1437
1438 for curr_shape in shape_list:
1439 build_const_net(
1440 args,
1441 curr_shape,
1442 op_name,
1443 dtype,
1444 excluded_framework_list,
1445 None,
1446 result_name,
1447 bounded_hash_val,
1448 rng,
1449 filter,
1450 unit_test_args,
1451 )
1452
1453 # populate quantized unit test arguments
1454 # must exclude 'tf' and source dtype being tf.float32
1455 for dtype in quantized_dtypes:
1456 for curr_shape in shape_list:
1457 build_const_net(
1458 args,
1459 curr_shape,
1460 op_name,
1461 tf.float32,
1462 ["tf"],
1463 dtype,
1464 result_name,
1465 bounded_hash_val,
1466 rng,
1467 filter,
1468 unit_test_args,
1469 )
1470
1471 return unit_test_args
1472
1473
1474def createDynamicOpLists():
1475 """The templated operators are conv2d-style operators with a number of kernel
1476 sizes. Since the operator is unchanged, we generate the range of kernel
1477 sizes here in this loop and remove the original templates from the list.
1478
1479 This could be expanded to non-conv2d-style operators in the future."""
1480
1481 # Dynamically create op lists for convolutions with a list of kernel sizes
1482 KERNELS = [
1483 [1, 1],
1484 [3, 3],
1485 [5, 5],
1486 ]
1487
TatWai Chongfd629052022-07-25 04:01:58 +00001488 # dim = [D, H, W]
1489 KERNELS_3D = [
1490 [1, 1, 1],
1491 [2, 3, 3],
1492 [3, 5, 5],
1493 ]
1494
Jeremy Johnson015c3552022-02-23 12:15:03 +00001495 TEMPLATE_LIST = [
1496 "conv2d",
1497 "conv2d_bias",
1498 "conv2d_relu",
1499 "conv2d_relu6",
1500 "conv2d_relu_n1_to_1",
1501 "conv2d_tanh",
1502 "depthwise_conv2d",
1503 "depthwise_conv2d_bias",
1504 "transpose_conv2d",
1505 ]
1506
TatWai Chongfd629052022-07-25 04:01:58 +00001507 TEMPLATE_LIST_CONV3D = [
1508 "conv3d",
1509 "conv3d_bias",
1510 ]
1511
Jeremy Johnson015c3552022-02-23 12:15:03 +00001512 for t in TEMPLATE_LIST:
1513 for k in KERNELS:
1514 testName = "{}_{}x{}".format(t, k[0], k[1])
1515 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1516 TF_OP_LIST[testName]["filter"] = k
1517 TF_OP_LIST[testName]["template"] = False
1518
TatWai Chongfd629052022-07-25 04:01:58 +00001519 # The existing operators don't support the dimension of kernel that is higher than 2.
1520 for t in TEMPLATE_LIST_CONV3D:
1521 for k in KERNELS_3D:
1522 testName = "{}_{}x{}x{}".format(t, k[0], k[1], k[2])
1523 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1524 TF_OP_LIST[testName]["filter"] = k
1525 TF_OP_LIST[testName]["template"] = False
1526
Jeremy Johnson015c3552022-02-23 12:15:03 +00001527 # Delete any templates after having created any dynamic ops
1528 # This is a two-pass operation because it's bad practice to delete
1529 # keys from dictionaries while iterating
1530 keyList = []
1531 for k in TF_OP_LIST:
1532 try:
1533 if TF_OP_LIST[k]["template"]:
1534 keyList.append(k)
1535 continue
1536 except KeyError:
1537 pass
1538
1539 for k in keyList:
1540 del TF_OP_LIST[k]
1541
1542
1543def main():
1544 parser = argparse.ArgumentParser()
1545 parser.add_argument(
1546 "--seed", dest="random_seed", default=42, type=int, help="Random seed"
1547 )
1548 parser.add_argument(
1549 "--random-shapes",
1550 dest="random_shapes",
1551 default=0,
1552 type=int,
1553 help=(
1554 "Use N random shapes of each rank for generating tests,"
1555 "seeded with random seed"
1556 ),
1557 )
1558 parser.add_argument(
1559 "-o",
1560 "--output-dir",
1561 dest="output_dir",
1562 default=".",
1563 type=str,
1564 help="Test output directory path prefix",
1565 )
1566 parser.add_argument(
1567 "-q",
1568 "--quiet",
1569 dest="quiet",
1570 default=False,
1571 action="store_true",
1572 help="Do not print test names",
1573 )
1574 parser.add_argument(
1575 "-j", "--jobs", dest="jobs", type=int, default=1, help="Number of parallel jobs"
1576 )
1577 parser.add_argument(
1578 "-m",
1579 "--tflite-kernel-mode",
1580 dest="tflite_kernel_mode",
1581 type=str,
1582 choices=["reference", "optimized"],
1583 default="reference",
1584 help="TFLite interpreter kernel mode",
1585 )
1586 parser.add_argument(
1587 "--num-samples",
1588 dest="num_samples",
1589 default=200,
1590 type=int,
1591 help="Number of input samples for post-training quantization",
1592 )
1593 parser.add_argument(
1594 "--filter",
1595 dest="filter",
1596 default="",
1597 type=str,
1598 help="Filter test names by this expression",
1599 )
1600 args = parser.parse_args()
1601
1602 # Turn the filter into a re object if present
1603 filter = None
1604 if args.filter != "":
1605 filter = re.compile(args.filter)
1606
1607 # Autodetect CPU count
1608 if args.jobs <= 0:
1609 args.jobs = os.cpu_count()
1610
1611 # Disable TF info messages
1612 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
1613
1614 try:
1615 os.makedirs(args.output_dir)
1616 except FileExistsError:
1617 pass
1618
1619 if args.random_shapes:
1620 gen_rand_shapes(args)
1621
1622 # Build dynamic ops
1623 createDynamicOpLists()
1624
1625 # Generate the test list and arguments to run_unit_test()
1626 unit_test_args = []
1627
1628 for op in TF_OP_LIST:
1629 generate_op_tests(args, op, shape_list, "result", filter, unit_test_args)
1630
1631 errors = 0
1632 for t in unit_test_args:
1633 if not run_unit_test(*t):
1634 errors = errors + 1
1635
1636 if not args.quiet:
1637 print("\nAll tasks done - with {} errors".format(errors))
1638
1639 return 1 if errors else 0
1640
1641
1642if __name__ == "__main__":
1643 exit(main())