blob: fffb8428bb8e627e1a940bbcd5f927d792cc8d29 [file] [log] [blame]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001#!/usr/bin/env python3
Luke Hutton261b7b62023-01-10 14:50:31 +00002# Copyright (c) 2020-2023, ARM Limited.
Jeremy Johnson015c3552022-02-23 12:15:03 +00003# SPDX-License-Identifier: Apache-2.0
4import argparse
5import os
6import re
7import traceback
8
9import numpy as np
10
11# Level | Level for Humans | Level Description
12# -------|------------------|------------------------------------
13# 0 | DEBUG | [Default] Print all messages
14# 1 | INFO | Filter out INFO messages
15# 2 | WARNING | Filter out INFO & WARNING messages
16# 3 | ERROR | Filter out all messages
17# Filter tensorflow debug message except errors
18os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
19
20# Flake8 E402 - ignore imports not at top of file to allow os.environ setting
21import tensorflow as tf # noqa: E402
22from frameworks.write_test_json import write_test_json # noqa: E402
23from frameworks.arg_gen import ArgGen # noqa: E402
24from frameworks.tensor_gen import TGen # noqa: E402
Jerry Geb1f25012023-03-03 11:33:51 -080025from frameworks.tensor_gen import ElemSignedness # noqa: E402
Jeremy Johnson015c3552022-02-23 12:15:03 +000026from frameworks.test_builder import TBuilder # noqa: E402
Jeremy Johnson5d1a3472022-03-31 09:50:06 +010027from frameworks.test_gen_utils import ( # noqa: E402
Jeremy Johnson015c3552022-02-23 12:15:03 +000028 QuantType,
29 get_tf_dtype,
30 get_shape_str,
31) # noqa: E402
32from tensorflow.lite.python.interpreter import OpResolverType # noqa: E402
33
34# All of the supported frameworks
35ALL_FRAMEWORKS = ["tf", "tflite"]
36
37# Lists of different data types
38TYPE_F = [tf.float32]
39TYPE_I = [tf.int32]
40TYPE_FI = [tf.float32, tf.int32]
41TYPE_B = [tf.bool]
42TYPE_FIB = [tf.float32, tf.int32, tf.bool]
43TYPE_H = [tf.float16]
44TYPE_FH = [tf.float32, tf.float16]
45TYPE_FHI = [tf.float32, tf.float16, tf.int32]
46TYPE_FHIB = [tf.float32, tf.float16, tf.int32, tf.bool]
47
48# The list of operator tests
49# Each dictionary entry for an op is a dictionary with the following required members:
50# 'operands': tuple (number_of_placeholder_tensors, number_of_constant_tensors)
51# 'build_fcn: tuple (Test builder function, Tensor generator function,
52# Argument generator function)
53# 'types': list of Tensorflow types that should be tested for this op
54# OR
55# a dictionary of {'framework_name': [type_list] } for cases where only
56# a subset of the types should be tested in each framework. This can also
57# be used to restrict an operator to a particular framework.
58#
59# And optional members:
60# 'template': boolean (indicates that this is a templated op which gets further
61# processing in createDynamicOpLists)
62# 'bias': boolean indicating that there is a bias component to be generated
63# 'qtypes': List of QuantType quantized types to generate for this op
TatWai Chongfd629052022-07-25 04:01:58 +000064# 'rank': tuple (lowest rank, highest rank). Dimension range of input tensor.
Jeremy Johnson015c3552022-02-23 12:15:03 +000065
66TF_OP_LIST = {
67 "add": {
68 "operands": (2, 0),
69 "build_fcn": (TBuilder.Add, TGen.tgBFuzz, ArgGen.agNone),
70 "types": {
71 "tf": TYPE_FI,
72 "tflite": list(
73 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
74 ),
75 },
76 },
77 "sub": {
78 "operands": (2, 0),
79 "build_fcn": (TBuilder.Sub, TGen.tgBFuzz, ArgGen.agNone),
80 "types": {
81 "tf": TYPE_FI,
82 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
83 # QuantType.ALL_I16 fail in TFLite conversion
84 },
85 },
86 "mul": {
87 "operands": (2, 0),
88 "build_fcn": (TBuilder.Mul, TGen.tgBFuzz, ArgGen.agNone),
89 "types": {
90 "tf": TYPE_FI,
91 "tflite": list(
92 TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
93 ),
94 },
95 },
96 "exp": {
97 "operands": (1, 0),
98 "build_fcn": (TBuilder.Exp, TGen.tgBasic, ArgGen.agNone),
99 "types": TYPE_F,
100 },
101 "rcp": {
102 "operands": (1, 0),
103 "build_fcn": (TBuilder.Rcp, TGen.tgBasic, ArgGen.agNone),
104 "types": TYPE_F,
105 },
106 "relu": {
107 "operands": (1, 0),
108 "build_fcn": (TBuilder.Relu, TGen.tgBasic, ArgGen.agNone),
109 "types": {
110 "tf": TYPE_F,
111 "tflite": list(
112 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
113 ),
114 },
115 },
Jerry Ge93912432022-07-22 10:29:13 -0700116 "relu1": {
117 "operands": (1, 0),
118 "build_fcn": (TBuilder.Relu1, TGen.tgBasic, ArgGen.agNone),
119 "types": {
120 "tf": [],
121 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
122 },
123 },
Jerry Ge2eea5bf2022-10-11 16:27:05 +0000124 "relu0To1": {
125 "operands": (1, 0),
126 "build_fcn": (TBuilder.Relu0To1, TGen.tgBasic, ArgGen.agNone),
127 "types": {
128 "tf": [],
129 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
130 },
131 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000132 "relu6": {
133 "operands": (1, 0),
134 "build_fcn": (TBuilder.Relu6, TGen.tgBasic, ArgGen.agNone),
135 "types": {
136 "tf": TYPE_F,
137 "tflite": list(
138 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
139 ),
140 },
141 },
142 "leaky_relu": {
143 "operands": (1, 0),
144 "build_fcn": (TBuilder.LeakyRelu, TGen.tgBasic, ArgGen.agFloat),
145 "types": {
146 "tf": TYPE_F,
147 "tflite": list(
148 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
149 ),
150 },
151 },
TatWai Chong41a04fe2022-11-03 21:44:32 +0000152 "prelu": {
153 "operands": (1, 0),
154 "build_fcn": (TBuilder.Prelu, TGen.tgBasic, ArgGen.agNone),
155 "types": {
156 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
157 },
158 },
TatWai Chong473eb382022-08-02 04:21:30 +0000159 "gelu": {
160 "operands": (1, 0),
161 "build_fcn": (TBuilder.Gelu, TGen.tgBasic, ArgGen.agNone),
162 "types": {
163 # Need compiler support for tf.Erf.
164 # "tf": TYPE_F,
165 "tflite": list(
166 # Only float32, int8 and uint8 supported currently
167 TYPE_F
168 + [QuantType.ALL_U8, QuantType.ALL_I8]
169 ),
170 },
171 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000172 "concat": {
173 "operands": (2, 0),
174 "build_fcn": (TBuilder.Concat, TGen.tgBasic, ArgGen.agAxes),
175 "types": TYPE_FI,
176 },
177 "bitwise_and": {
178 "operands": (2, 0),
179 "build_fcn": (TBuilder.BitwiseAnd, TGen.tgBFuzz, ArgGen.agNone),
180 "types": {"tf": TYPE_I}, # Not supported in TF Lite
181 },
182 "bitwise_or": {
183 "operands": (2, 0),
184 "build_fcn": (TBuilder.BitwiseOr, TGen.tgBFuzz, ArgGen.agNone),
185 "types": {"tf": TYPE_I}, # Not supported in TF Lite
186 },
187 "bitwise_not": {
188 "operands": (1, 0),
189 "build_fcn": (TBuilder.BitwiseNot, TGen.tgBFuzz, ArgGen.agNone),
190 "types": {"tf": TYPE_I}, # Not supported in TF Lite
191 },
192 "bitwise_xor": {
193 "operands": (2, 0),
194 "build_fcn": (TBuilder.BitwiseXor, TGen.tgBFuzz, ArgGen.agNone),
195 "types": {"tf": TYPE_I}, # Not supported in TF Lite
196 },
197 "logical_and": {
198 "operands": (2, 0),
199 "build_fcn": (TBuilder.LogicalAnd, TGen.tgBFuzz, ArgGen.agNone),
200 "types": TYPE_B,
201 },
202 "logical_or": {
203 "operands": (2, 0),
204 "build_fcn": (TBuilder.LogicalOr, TGen.tgBFuzz, ArgGen.agNone),
205 "types": TYPE_B,
206 },
207 "logical_not": {
208 "operands": (1, 0),
209 "build_fcn": (TBuilder.LogicalNot, TGen.tgBFuzz, ArgGen.agNone),
210 "types": TYPE_B,
211 },
212 "reduce_any": {
213 "operands": (1, 0),
214 "build_fcn": (TBuilder.ReduceAny, TGen.tgBasic, ArgGen.agAxesListKeepdims),
215 "types": TYPE_B,
216 },
217 "reduce_all": {
218 "operands": (1, 0),
219 "build_fcn": (TBuilder.ReduceAll, TGen.tgBasic, ArgGen.agAxesListKeepdims),
TatWai Chong6ad6d6e2022-11-11 15:28:50 -0800220 "types": TYPE_B,
Jeremy Johnson015c3552022-02-23 12:15:03 +0000221 },
222 "reduce_min": {
223 "operands": (1, 0),
224 "build_fcn": (TBuilder.ReduceMin, TGen.tgBasic, ArgGen.agAxesListKeepdims),
225 "types": {
226 "tf": TYPE_FI,
227 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
228 },
229 },
230 "reduce_max": {
231 "operands": (1, 0),
232 "build_fcn": (TBuilder.ReduceMax, TGen.tgBasic, ArgGen.agAxesListKeepdims),
233 "types": {
234 "tf": TYPE_FI,
235 "tflite": list(TYPE_FI + [QuantType.ALL_U8, QuantType.ALL_I8]),
236 },
237 },
238 "reduce_sum": {
239 "operands": (1, 0),
240 "build_fcn": (TBuilder.ReduceSum, TGen.tgBasic, ArgGen.agAxesListKeepdims),
241 "types": {
242 "tf": TYPE_F,
243 # v2 converter doesn't recognize quantized reduce_sum
244 # "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
245 "tflite": TYPE_F,
246 },
247 },
248 "reduce_mean": {
249 "operands": (1, 0),
250 "build_fcn": (TBuilder.ReduceMean, TGen.tgBasic, ArgGen.agAxesListKeepdims),
251 "types": {
252 "tf": TYPE_F,
253 "tflite": list(
254 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
255 ),
256 },
257 },
258 "reduce_product": {
259 "operands": (1, 0),
260 "build_fcn": (TBuilder.ReduceProduct, TGen.tgBasic, ArgGen.agAxesListKeepdims),
261 "types": TYPE_F,
262 },
263 "min": {
264 "operands": (2, 0),
265 "build_fcn": (TBuilder.Min, TGen.tgBFuzz, ArgGen.agNone),
266 "types": TYPE_FI,
267 },
268 "max": {
269 "operands": (2, 0),
270 "build_fcn": (TBuilder.Max, TGen.tgBFuzz, ArgGen.agNone),
271 "types": TYPE_FI,
272 },
273 "pow": {
274 "operands": (2, 0),
275 "build_fcn": (TBuilder.Pow, TGen.tgBFuzz, ArgGen.agNone),
276 # Technically, integer is supported, but only for positive exponents.
277 # Needs a random argument generator.
278 "types": TYPE_F,
279 },
280 "abs": {
281 "operands": (1, 0),
282 "build_fcn": (TBuilder.Abs, TGen.tgBasic, ArgGen.agNone),
283 "types": TYPE_F,
284 },
285 "ceil": {
286 "operands": (1, 0),
287 "build_fcn": (TBuilder.Ceil, TGen.tgBasic, ArgGen.agNone),
288 "types": TYPE_F,
289 },
290 "floor": {
291 "operands": (1, 0),
292 "build_fcn": (TBuilder.Floor, TGen.tgBasic, ArgGen.agNone),
293 "types": TYPE_F,
294 },
295 "log": {
296 "operands": (1, 0),
297 "build_fcn": (TBuilder.Log, TGen.tgBasic, ArgGen.agNone),
298 "types": TYPE_F,
299 },
300 "negate": {
301 "operands": (1, 0),
302 "build_fcn": (TBuilder.Negate, TGen.tgBasic, ArgGen.agNone),
303 "types": TYPE_F,
304 },
305 "rsqrt": {
306 "operands": (1, 0),
Jerry Geb1f25012023-03-03 11:33:51 -0800307 "build_fcn": (TBuilder.Rsqrt, TGen.tgBasicPositive, ArgGen.agNone),
308 "types": {
309 "tf": TYPE_F,
310 "tflite": list(TYPE_F + [QuantType.ALL_I8]),
311 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000312 },
TatWai Chongd713a4d2022-11-10 13:54:28 -0800313 "sign": {
314 "operands": (1, 0),
315 "build_fcn": (TBuilder.Sign, TGen.tgBasic, ArgGen.agNone),
316 "types": {
317 "tf": TYPE_F,
318 },
319 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000320 "sigmoid": {
321 "operands": (1, 0),
322 "build_fcn": (TBuilder.Sigmoid, TGen.tgBasic, ArgGen.agNone),
323 "types": {
324 "tf": TYPE_F,
325 "tflite": list(
326 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
327 ),
328 },
329 },
330 "tanh": {
331 "operands": (1, 0),
332 "build_fcn": (TBuilder.Tanh, TGen.tgBasic, ArgGen.agNone),
333 "types": {
334 "tf": TYPE_F,
335 "tflite": list(
336 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
337 ),
338 },
339 },
Luke Hutton41601862022-12-06 17:29:15 +0000340 "sin": {
341 "operands": (1, 0),
342 "build_fcn": (TBuilder.Sin, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000343 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000344 },
345 "cos": {
346 "operands": (1, 0),
347 "build_fcn": (TBuilder.Cos, TGen.tgBasic, ArgGen.agNone),
Luke Hutton657af862022-12-16 16:30:45 +0000348 "types": TYPE_F,
Luke Hutton41601862022-12-06 17:29:15 +0000349 },
Luke Hutton2138a192022-12-15 11:01:39 +0000350 "atan2": {
351 "operands": (2, 0),
352 "build_fcn": (TBuilder.Atan2, TGen.tgBasic, ArgGen.agNone),
353 "types": {
354 "tflite": TYPE_F,
355 },
356 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000357 "square": {
358 "operands": (1, 0),
359 "build_fcn": (TBuilder.Square, TGen.tgBasic, ArgGen.agNone),
360 "types": TYPE_F,
361 },
362 "squared_difference": {
363 "operands": (2, 0),
364 "build_fcn": (TBuilder.SquaredDifference, TGen.tgBFuzz, ArgGen.agNone),
365 "types": TYPE_F,
366 },
367 "equal": {
368 "operands": (2, 0),
369 "build_fcn": (TBuilder.Equal, TGen.tgBFuzz, ArgGen.agNone),
370 "types": TYPE_FI,
371 },
372 "greater_equal": {
373 "operands": (2, 0),
374 "build_fcn": (TBuilder.GreaterEqual, TGen.tgBFuzz, ArgGen.agNone),
375 "types": TYPE_FI,
376 },
377 "greater": {
378 "operands": (2, 0),
379 "build_fcn": (TBuilder.Greater, TGen.tgBFuzz, ArgGen.agNone),
380 "types": TYPE_FI,
381 },
382 "less": {
383 "operands": (2, 0),
384 "build_fcn": (TBuilder.Less, TGen.tgBFuzz, ArgGen.agNone),
385 "types": TYPE_FI,
386 },
387 "less_equal": {
388 "operands": (2, 0),
389 "build_fcn": (TBuilder.LessEqual, TGen.tgBFuzz, ArgGen.agNone),
390 "types": TYPE_FI,
391 },
392 "conv2d_TEMPLATE": {
393 "operands": (1, 1),
394 "build_fcn": (TBuilder.Conv2d, TGen.tgConv2d, ArgGen.agConv2d),
395 "types": {
396 "tf": [tf.float32],
397 "tflite": [
398 tf.float32,
399 QuantType.CONV_U8_U8,
400 QuantType.CONV_I8_I8,
401 QuantType.CONV_I16_I8,
402 ],
403 },
404 "template": True,
405 },
406 "conv2d_relu_TEMPLATE": {
407 "operands": (1, 2),
408 "build_fcn": (TBuilder.Conv2dRelu, TGen.tgConv2d, ArgGen.agNone),
409 "types": {
410 "tf": [tf.float32],
411 "tflite": [
412 tf.float32,
413 QuantType.CONV_U8_U8,
414 QuantType.CONV_I8_I8,
415 QuantType.CONV_I16_I8,
416 ],
417 },
418 "template": True,
419 },
420 "conv2d_relu6_TEMPLATE": {
421 "operands": (1, 2),
422 "build_fcn": (TBuilder.Conv2dRelu6, TGen.tgConv2d, ArgGen.agNone),
423 "types": {
424 "tf": [tf.float32],
425 "tflite": [
426 tf.float32,
427 QuantType.CONV_U8_U8,
428 QuantType.CONV_I8_I8,
429 QuantType.CONV_I16_I8,
430 ],
431 },
432 "template": True,
433 },
434 "conv2d_relu_n1_to_1_TEMPLATE": {
435 "operands": (1, 2),
436 "build_fcn": (TBuilder.Conv2dReluN1To1, TGen.tgConv2d, ArgGen.agNone),
437 "types": {
438 "tf": [tf.float32],
439 "tflite": [
440 tf.float32,
441 QuantType.CONV_U8_U8,
442 QuantType.CONV_I8_I8,
443 QuantType.CONV_I16_I8,
444 ],
445 },
446 "template": True,
447 },
448 # This test is converted as:
449 # tfl.conv2d(){fused_activation_function="NONE"} + tfl.tanh()
450 # TODO: anyway to generate tfl.conv2d(){fused_activation_function="TANH"}?
451 "conv2d_tanh_TEMPLATE": {
452 "operands": (1, 2),
453 "build_fcn": (TBuilder.Conv2dTanh, TGen.tgConv2d, ArgGen.agNone),
454 "types": {
455 "tf": [tf.float32],
456 "tflite": [
457 tf.float32,
458 QuantType.CONV_U8_U8,
459 QuantType.CONV_I8_I8,
460 QuantType.CONV_I16_I8,
461 ],
462 },
463 "template": True,
464 },
465 "conv2d_bias_TEMPLATE": {
466 "operands": (1, 2),
467 "build_fcn": (TBuilder.Conv2dWithBias, TGen.tgConv2d, ArgGen.agConv2d),
468 "types": {
469 "tf": [tf.float32],
470 "tflite": [
471 tf.float32,
472 QuantType.CONV_U8_U8,
473 QuantType.CONV_I8_I8,
474 QuantType.CONV_I16_I8,
475 ],
476 },
477 "bias": True,
478 "template": True,
479 },
TatWai Chongfd629052022-07-25 04:01:58 +0000480 "conv3d_TEMPLATE": {
481 "operands": (1, 1),
482 "build_fcn": (TBuilder.Conv3d, TGen.tgConv3d, ArgGen.agConv3d),
483 "types": {
484 "tf": [tf.float32],
485 "tflite": [
486 tf.float32,
487 QuantType.CONV_U8_U8,
488 QuantType.CONV_I8_I8,
489 # Quantization to 16x8-bit not yet supported by tflite.
490 ],
491 },
492 "template": True,
493 "rank": (1, 5),
494 },
495 "conv3d_bias_TEMPLATE": {
496 "operands": (1, 2),
497 "build_fcn": (TBuilder.Conv3dWithBias, TGen.tgConv3d, ArgGen.agConv3d),
498 "types": {
499 "tf": [tf.float32],
500 "tflite": [
501 tf.float32,
502 QuantType.CONV_U8_U8,
503 QuantType.CONV_I8_I8,
504 # Quantization to 16x8-bit not yet supported by tflite.
505 ],
506 },
507 "bias": True,
508 "template": True,
509 "rank": (1, 5),
510 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000511 "depthwise_conv2d_TEMPLATE": {
512 "operands": (1, 1),
513 "build_fcn": (
514 TBuilder.DepthwiseConv2d,
515 TGen.tgDepthwiseConv2d,
516 ArgGen.agDepthwiseConv2d,
517 ),
518 "types": {
519 "tf": [tf.float32],
520 "tflite": [
521 tf.float32,
522 QuantType.CONV_U8_U8,
523 QuantType.CONV_I8_I8,
524 QuantType.CONV_I16_I8,
525 ],
526 },
527 "template": True,
528 },
529 "depthwise_conv2d_bias_TEMPLATE": {
530 "operands": (1, 2),
531 "build_fcn": (
532 TBuilder.DepthwiseConv2dWithBias,
533 TGen.tgDepthwiseConv2d,
534 ArgGen.agDepthwiseConv2d,
535 ),
536 "types": {
537 "tf": [tf.float32],
538 "tflite": [
539 tf.float32,
540 QuantType.CONV_U8_U8,
541 QuantType.CONV_I8_I8,
542 QuantType.CONV_I16_I8,
543 ],
544 },
545 "bias": True,
546 "template": True,
547 },
548 "transpose_conv2d_TEMPLATE": {
549 "operands": (1, 1),
550 "build_fcn": (
551 TBuilder.TransposeConv2d,
552 TGen.tgTransposeConv2d,
553 ArgGen.agTransposeConv2d,
554 ),
555 "types": {
556 "tf": [tf.float32],
557 "tflite": [
558 tf.float32,
559 QuantType.CONV_U8_U8,
560 QuantType.CONV_I8_I8,
561 QuantType.CONV_I16_I8,
562 ],
563 },
564 "template": True,
565 },
566 "argmax": {
567 "operands": (1, 0),
568 "build_fcn": (TBuilder.Argmax, TGen.tgBasic, ArgGen.agAxes),
569 "types": {"tf": TYPE_F},
570 },
571 "avg_pool2d": {
572 "operands": (1, 0),
573 "build_fcn": (TBuilder.AvgPool2d, TGen.tgPooling, ArgGen.agPooling),
574 "types": {
575 "tf": TYPE_F,
576 "tflite": list(
577 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
578 ),
579 },
580 },
581 "max_pool2d": {
582 "operands": (1, 0),
583 "build_fcn": (TBuilder.MaxPool2d, TGen.tgPooling, ArgGen.agPooling),
584 "types": {
585 "tf": TYPE_F,
586 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
587 # ALL_I16 not supported yet
588 # In tensorflow/compiler/mlir/lite/ir/tfl_ops.td,
589 # QI16 is missing from MaxPoolOperandAndResultConstraints
590 # If adding QI16 back this test can run through.
591 },
592 },
593 "reshape": {
594 "operands": (1, 0),
595 "build_fcn": (TBuilder.Reshape, TGen.tgBasic, ArgGen.agReshape),
596 "types": TYPE_FI,
597 },
598 "transpose": {
599 "operands": (1, 0),
600 "build_fcn": (TBuilder.Transpose, TGen.tgBasic, ArgGen.agTranspose),
601 "types": TYPE_FI,
602 },
603 "slice": {
604 "operands": (1, 0),
605 "build_fcn": (TBuilder.Slice, TGen.tgBasic, ArgGen.agSlice),
606 "types": TYPE_FI,
607 },
608 "strided_slice": {
609 "operands": (1, 0),
610 "build_fcn": (TBuilder.StridedSlice, TGen.tgBasic, ArgGen.agStridedSlice),
611 "types": TYPE_FI,
612 },
613 "select": {
614 "operands": (3, 0),
615 "build_fcn": (TBuilder.Select, TGen.tgSelect, ArgGen.agNone),
616 "types": TYPE_FI,
617 },
618 "addn": {
619 "operands": (4, 0),
620 "build_fcn": (TBuilder.Addn, TGen.tgBasic, ArgGen.agNone),
621 "types": TYPE_FI,
622 },
623 "concatv2": {
624 "operands": (4, 0),
625 "build_fcn": (TBuilder.Concatv2, TGen.tgBasic, ArgGen.agAxes),
626 "types": TYPE_FI,
627 },
628 "stack": {
629 "operands": (4, 0),
630 "build_fcn": (TBuilder.Stack, TGen.tgBasic, ArgGen.agStack),
631 "types": TYPE_FI,
632 },
633 "unstack": {
634 "operands": (1, 0),
635 "build_fcn": (TBuilder.Unstack, TGen.tgPooling, ArgGen.agAxes),
636 "types": TYPE_F,
637 },
TatWai Chongf7008da2022-09-09 09:35:40 +0000638 "mirrorpad": {
639 "operands": (1, 0),
640 "build_fcn": (TBuilder.MirrorPad, TGen.tgBasic, ArgGen.agMirrorPad),
641 "types": TYPE_FI,
642 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000643 "pad": {
644 "operands": (1, 0),
645 "build_fcn": (TBuilder.Pad, TGen.tgBasic, ArgGen.agPad),
TatWai Chong2226f902023-02-22 18:38:01 -0800646 "types": {
647 "tf": TYPE_F,
648 "tflite": list(TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8]),
649 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000650 },
651 "expand_dims": {
652 "operands": (1, 0),
653 "build_fcn": (TBuilder.ExpandDims, TGen.tgBasic, ArgGen.agStack),
654 "types": TYPE_FI,
655 },
656 "shape": {
657 "operands": (1, 0),
658 "build_fcn": (TBuilder.Shape, TGen.tgBasic, ArgGen.agNone),
659 "types": TYPE_FI,
660 },
661 "rank": {
662 "operands": (1, 0),
663 "build_fcn": (TBuilder.Rank, TGen.tgBasic, ArgGen.agNone),
664 "types": TYPE_FI,
665 },
666 "fill": {
667 "operands": (1, 0),
668 "build_fcn": (TBuilder.Fill, TGen.tgBasic, ArgGen.agFill),
669 "types": TYPE_FI,
670 },
671 "elu": {
672 "operands": (1, 0),
673 "build_fcn": (TBuilder.Elu, TGen.tgBasic, ArgGen.agNone),
674 "types": TYPE_F,
675 },
676 "softmax": {
677 "operands": (1, 0),
678 "build_fcn": (TBuilder.Softmax, TGen.tgBasic, ArgGen.agNone),
679 "types": {
680 "tf": TYPE_F,
681 "tflite": list(
682 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
683 ),
684 },
685 },
686 "log_softmax": {
687 "operands": (1, 0),
688 "build_fcn": (TBuilder.LogSoftmax, TGen.tgBasic, ArgGen.agNone),
689 "types": TYPE_F,
690 },
691 "matmul": {
692 "operands": (2, 0),
693 "build_fcn": (TBuilder.MatMul, TGen.tgMatmul, ArgGen.agNone),
694 "types": {
695 "tf": TYPE_F,
696 "tflite": list(
697 TYPE_F
698 + [QuantType.ALL_U8, QuantType.ALL_I8]
699 # 16 bits matmul fail to convert
700 ),
701 },
702 },
703 "add_scalar": {
704 "operands": (1, 0),
705 "build_fcn": (TBuilder.AddScalar, TGen.tgBasic, ArgGen.agNone),
706 "types": TYPE_F,
707 },
708 "add_1d": {
709 "operands": (2, 0),
710 "build_fcn": (TBuilder.Add1d, TGen.tgBasic, ArgGen.agNone),
711 "types": TYPE_F,
712 },
713 "split": {
714 "operands": (1, 0),
715 "build_fcn": (TBuilder.Split, TGen.tgBasic, ArgGen.agSplit),
716 "types": TYPE_FI,
717 },
718 "tile": {
719 "operands": (1, 0),
720 "build_fcn": (TBuilder.Tile, TGen.tgBasic, ArgGen.agTile),
721 "types": TYPE_FI,
722 },
723 "reverse": {
724 "operands": (1, 0),
725 "build_fcn": (TBuilder.Reverse, TGen.tgBasic, ArgGen.agAxes),
726 "types": {"tf": TYPE_FI},
727 },
728 "gather": {
729 "operands": (1, 0),
730 "build_fcn": (TBuilder.Gather, TGen.tgBasic, ArgGen.agGather),
731 "types": TYPE_FI,
732 },
733 "gather_nd": {
734 "operands": (1, 0),
735 "build_fcn": (TBuilder.GatherNd, TGen.tgBasic, ArgGen.agGatherND),
736 "types": TYPE_FI,
737 },
738 "scatter_nd": {
739 "operands": (1, 0),
740 "build_fcn": (TBuilder.ScatterNd, TGen.tgBasic, ArgGen.agScatterND),
741 "types": TYPE_FI,
742 },
743 "space_to_batch": {
744 "operands": (1, 0),
745 "build_fcn": (TBuilder.SpaceToBatch, TGen.tgBasic, ArgGen.agSpaceToBatch),
746 "types": TYPE_F,
747 },
748 "batch_to_space": {
749 "operands": (1, 0),
750 "build_fcn": (TBuilder.BatchToSpace, TGen.tgBasic, ArgGen.agBatchToSpace),
751 "types": TYPE_F,
752 },
753 "space_to_depth": {
754 "operands": (1, 0),
755 "build_fcn": (TBuilder.SpaceToDepth, TGen.tgBasic, ArgGen.agSpaceToDepth),
756 "types": TYPE_F,
757 },
758 "depth_to_space": {
759 "operands": (1, 0),
760 "build_fcn": (TBuilder.DepthToSpace, TGen.tgBasic, ArgGen.agDepthToSpace),
761 "types": TYPE_F,
762 },
763 "one_hot": {
764 "operands": (3, 1),
765 "build_fcn": (TBuilder.OneHot, TGen.tgOneHot, ArgGen.agOneHot),
766 "types": TYPE_FI,
767 },
768 "fakequant": {
769 "operands": (1, 0),
770 "build_fcn": (
771 TBuilder.Fakequant,
772 TGen.tgBasic,
773 ArgGen.agFakequant,
774 ),
775 "types": {"tf": TYPE_F},
776 },
TatWai Chong0cef07e2023-02-27 13:22:52 -0800777 "resize": {
Jeremy Johnson015c3552022-02-23 12:15:03 +0000778 "operands": (1, 0),
TatWai Chong0cef07e2023-02-27 13:22:52 -0800779 "build_fcn": (TBuilder.Resize, TGen.tgPooling, ArgGen.agResize),
TatWai Chongf7326092022-06-08 12:17:14 -0700780 "types": {
781 "tf": TYPE_F,
782 "tflite": list(
783 TYPE_F + [QuantType.ALL_U8, QuantType.ALL_I8, QuantType.ALL_I16]
784 ),
785 },
786 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000787 "left_shift": {
788 "operands": (1, 0),
789 "build_fcn": (TBuilder.LeftShift, TGen.tgBasic, ArgGen.agShift),
790 "types": {"tf": [tf.int32]},
791 },
792 "right_shift": {
793 "operands": (1, 0),
794 "build_fcn": (TBuilder.RightShift, TGen.tgBasic, ArgGen.agShift),
795 "types": {
796 "tf": [
797 tf.int32,
798 ]
799 },
800 },
Jerry Ge9e94af82022-10-27 09:57:00 -0700801 "while": {
802 "operands": (1, 0),
803 "build_fcn": (TBuilder.While, TGen.tgBasic, ArgGen.agNone),
804 "types": {
805 "tflite": list(TYPE_F),
806 },
807 },
808 "lstm": {
809 "operands": (1, 0),
810 "build_fcn": (TBuilder.LSTM, TGen.tgRecurrent, ArgGen.agNone),
811 "types": {
812 "tflite": [
813 tf.float32,
814 # tf.int32
815 ]
816 },
817 },
818 "gru": {
819 "operands": (1, 0),
820 "build_fcn": (TBuilder.GRU, TGen.tgRecurrent, ArgGen.agNone),
821 "types": {
822 "tflite": [
823 tf.float32,
824 # tf.int32
825 ]
826 },
827 },
828 "rnn": {
829 "operands": (1, 0),
830 "build_fcn": (TBuilder.RNN, TGen.tgRecurrent, ArgGen.agNone),
831 "types": {
832 "tflite": [
833 tf.float32,
834 ]
835 },
836 },
Luke Hutton261b7b62023-01-10 14:50:31 +0000837 "rfft2d": {
838 "operands": (1, 0),
839 "build_fcn": (TBuilder.RFFT2d, TGen.tgRFFT2d, ArgGen.agRFFT2d),
840 "types": {
841 "tflite": TYPE_F,
842 },
843 },
Luke Hutton714aa602023-02-08 19:45:26 +0000844 "real": {
845 "operands": (1, 0),
846 "build_fcn": (TBuilder.Real, TGen.tgComplexComponents, ArgGen.agNone),
847 "types": {
848 "tflite": [tf.complex64],
849 },
850 },
851 "imag": {
852 "operands": (1, 0),
853 "build_fcn": (TBuilder.Imag, TGen.tgComplexComponents, ArgGen.agNone),
854 "types": {
855 "tflite": [tf.complex64],
856 },
857 },
Jeremy Johnson015c3552022-02-23 12:15:03 +0000858}
859
860# Shapes to be tested; default can be overwritten
861shape_list = [
862 (1,),
863 (64,),
864 (14, 19),
865 (13, 21, 3),
Luke Hutton261b7b62023-01-10 14:50:31 +0000866 (1, 8, 16),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000867 (1, 4, 4, 4),
868 (1, 8, 4, 17),
869 (1, 4, 8, 19),
870 (1, 32, 32, 8),
871 (1, 7, 7, 9),
TatWai Chong0cef07e2023-02-27 13:22:52 -0800872 (3, 1, 1, 7),
TatWai Chongfd629052022-07-25 04:01:58 +0000873 (2, 2, 7, 7, 2),
874 (1, 4, 8, 21, 17),
875 (3, 32, 16, 16, 5),
Jeremy Johnson015c3552022-02-23 12:15:03 +0000876]
877
878
879def gen_rand_shapes(args):
880 """Overwrite the global shape list with a new list of random shapes"""
881 global shape_list
882
883 rng = np.random.default_rng(args.random_seed)
884
885 # Don't let things get too big... cap the maximum volume, but let
886 # an individual dimension be 1..47
887 max_total_volume = 32 * 32 * 4
888
889 shape_list = []
TatWai Chongfd629052022-07-25 04:01:58 +0000890 # Only iterate over ranks 2, 3, 4, and 5
891 for rank in range(2, 6):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000892 for n in range(args.random_shapes):
893 new_shape = rng.integers(1, 48, size=rank)
894
TatWai Chongfd629052022-07-25 04:01:58 +0000895 # Set the batch dimension on 4D or 5D objects to 1
896 if rank == 4 or rank == 5:
Jeremy Johnson015c3552022-02-23 12:15:03 +0000897 new_shape[0] = 1
898
899 # Limit the total shape volume and throw out any
900 # shapes that wouldn't leave at least size=2 in some non-batch dimension
901 volume = 1
902 skip_shape = False
903 for i in range(rank):
904
905 volume *= new_shape[i]
906
907 # Reduce the shape, while it's larger than the maximum volume
908 while volume > max_total_volume:
909 new_shape[i] = new_shape[i] // 2
910 volume = volume // 2
911
912 # Now an untenable dimension size? Skip this one.
913 if new_shape[i] < 1:
914 skip_shape = True
915
916 if not skip_shape:
917 shape_list.append(tuple(new_shape))
918
919
920# Construct, run and save a whole tensorflow tf.function to a protobuf file
921# or convert to .tflite if it's quantized unit test
922def run_unit_test(
923 op_name,
924 args,
925 test_dir,
926 curr_shape,
927 addl_args,
928 dtype,
929 excluded_framework_list,
930 quantized_inference_dtype,
931 result_name,
932 seed,
933):
934
935 try:
936 op = TF_OP_LIST[op_name]
937 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
938
939 # Get and seed a random number generator for this test
940 rng = np.random.default_rng(seed)
941
942 # return placeholders=(str: name, np.array: value)
943 # consts=(str: name, np.array: value)
944 placeholders, consts = tensor_gen_fcn(op, curr_shape, dtype, rng)
945
946 # if test doesn't have any placeholders/consts, terminated
947 if len(placeholders) == 0 and len(consts) == 0:
948 return True
949
950 if not args.quiet:
951 print(" {} ".format(test_dir))
952
953 try:
954 os.mkdir(test_dir)
955 except FileExistsError:
956 pass
957
958 const_nodes = [value for name, value in consts]
959
960 num_placeholders = len(placeholders)
961 # if test is quantized, create tensor quantization metadata info for
962 # each input tensor, based on different quantized type
963 if quantized_inference_dtype:
964 is_quantized = True
965 # TODO: support INT8 IFM x INT4 weight later
966 if quantized_inference_dtype == QuantType.ALL_U8:
967 qzero = [128] * num_placeholders
968 numpy_dtype = [np.uint8] * num_placeholders
969 tflite_inference_dtype = tf.uint8
970 elif quantized_inference_dtype == QuantType.ALL_I8:
971 qzero = [0] * num_placeholders
972 numpy_dtype = [np.int8] * num_placeholders
973 tflite_inference_dtype = tf.int8
974 elif quantized_inference_dtype == QuantType.ALL_I16:
975 qzero = [0] * num_placeholders
976 numpy_dtype = [np.int16] * num_placeholders
977 tflite_inference_dtype = tf.int16
978 elif quantized_inference_dtype == QuantType.CONV_U8_U8:
979 assert (
980 num_placeholders == 1
981 ), "Unsupported number of placeholders for Convolution: {}".format(
982 num_placeholders
983 )
984 qzero = [128] * num_placeholders
985 if num_placeholders == 2:
986 numpy_dtype = [np.uint8, np.uint8]
987 else:
988 numpy_dtype = [np.uint8, np.uint8, np.int32]
989 tflite_inference_dtype = tf.uint8
990 elif quantized_inference_dtype == QuantType.CONV_I8_I8:
991 assert (
992 num_placeholders == 1
993 ), "Unsupported number of placeholders for Convolution: {}".format(
994 num_placeholders
995 )
996 qzero = [0] * num_placeholders
997 if num_placeholders == 2:
998 numpy_dtype = [np.int8, np.int8]
999 else:
1000 numpy_dtype = [np.int8, np.int8, np.int32]
1001 tflite_inference_dtype = tf.int8
1002 elif quantized_inference_dtype == QuantType.CONV_I16_I8:
1003 assert (
1004 num_placeholders == 1
1005 ), "Unsupported number of placeholders for Convolution: {}".format(
1006 num_placeholders
1007 )
1008 if num_placeholders == 2:
1009 qzero = [0, 0]
1010 numpy_dtype = [np.int16, np.int8]
1011 else:
1012 qzero = [0, 0, 0]
1013 numpy_dtype = [
1014 np.int16,
1015 np.int8,
1016 np.int64,
1017 ] # np.int64 to represent 40 bits accumulator
1018 tflite_inference_dtype = tf.int16
1019 else:
1020 raise Exception(
1021 "Unsupported fakequant dtype: {}".format(quantized_inference_dtype)
1022 )
1023
1024 else:
1025 is_quantized = False
1026
1027 tf_model_filename = None
1028 tf_result_npy_filename = None
1029 tf_result_name = None
1030
1031 tflite_model_filename = None
1032 tflite_result_npy_filename = None
1033 tflite_result_name = None
1034
1035 placeholder_names = []
1036 placeholder_vals = []
1037 placeholder_signatures = ()
1038 placeholder_npy_filenames = []
1039 placeholder_shapes = []
1040
1041 for idx, (name, val) in enumerate(placeholders):
1042 placeholder_names.append(name)
1043 placeholder_signatures = placeholder_signatures + (
1044 tf.TensorSpec(shape=val.shape, dtype=val.dtype, name=name),
1045 )
1046 placeholder_npy_filenames.append("{}.npy".format(name.split(":")[0]))
1047 placeholder_shapes.append(val.shape)
1048
1049 # Get test builder class
1050 fcn_node = op_fcn(*const_nodes, *addl_args, result_name)
1051 concrete_function = tf.function(input_signature=placeholder_signatures)(
1052 fcn_node.eval
1053 ).get_concrete_function()
1054
1055 if is_quantized:
1056
1057 assert dtype is tf.float32, "quantized test must come from float32 graph"
1058
1059 # 1. Quantize float placeholder npy to quantized to feed the graph
1060 for idx, (name, val) in enumerate(placeholders):
1061
1062 # we use np.amin()/np.amax() to determine dynamic range
1063 # for quantized test
1064 zeropoint = 0
1065 scale = 1.0
1066 if numpy_dtype[idx] != np.int64:
1067 qmin = np.iinfo(numpy_dtype[idx]).min
1068 qmax = np.iinfo(numpy_dtype[idx]).max
1069 num_bits = np.iinfo(numpy_dtype[idx]).bits
1070 # 40 bit is represented as np.int64
1071 else:
1072 num_bits = 40
1073 qmin = -(1 << num_bits)
1074 qmax = (1 << num_bits) - 1
1075
1076 min_val = np.amin(val)
1077 max_val = np.amax(val)
1078
1079 # for single value tensor, we set scale equal to the abs(value),
1080 # and fix zeropoint to 128
1081 # if val > 0, it'll be represented as 129,
1082 # where val = (129 - 128) * val
1083 # if val < 0, it'll be represented as 127,
1084 # where val = (127 - 128) * (-val)
1085 # if val == 0, it'll be represted as 128, with range [-128.0, 128.0]
1086 # and let quantized 1 represent the value
1087 # also adjust effective min/max consequently
1088 if max_val == min_val:
1089 if max_val != 0:
1090 scale = abs(max_val)
1091 else:
1092 scale = 1.0
1093 min_val = float(qmin - qzero[idx]) * scale
1094 max_val = float(qmax - qzero[idx]) * scale
1095 else:
1096 scale = (max_val - min_val) / float(qmax - qmin)
1097 zeropoint = int(round((-min_val) / scale)) + qmin
1098
1099 # run through tf.fakequant first to assure quantization error aligned
1100 fakequant_val = tf.quantization.fake_quant_with_min_max_args(
1101 val,
1102 min=min_val,
1103 max=max_val,
1104 num_bits=num_bits,
1105 name="gen_quant_npy",
1106 )
1107
1108 quant_val = np.round(fakequant_val / scale).astype(np.int32) + zeropoint
1109
1110 # very few unit tests after TF hash may/2020, this quantized
1111 # value for some reason exceed [0, 255] range
1112 saved_val = np.clip(quant_val, qmin, qmax).astype(numpy_dtype[idx])
1113
1114 # saved all quantized tensor as np.int32
1115 # since TOSA numpy Cpp API only supports int32
1116 np.save(
1117 os.path.join(test_dir, placeholder_npy_filenames[idx]),
1118 saved_val.astype(np.int32),
1119 False,
1120 )
1121
1122 placeholder_vals.append(tf.convert_to_tensor(saved_val))
1123
1124 # 2. Convert the model to quantized TFLite flatbuffer
1125 module = tf.Module()
1126 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1127 [concrete_function], module
1128 )
1129 converter.optimizations = [tf.lite.Optimize.DEFAULT]
1130 converter.experimental_new_converter = True
1131
1132 # use MLIR-based post-quantizer
1133 converter.experimental_new_quantizer = True
1134
1135 flag = (
1136 tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 # noqa: E501
1137 )
1138 if tflite_inference_dtype == tf.int16:
1139 converter.target_spec.supported_ops = [flag]
1140
1141 def input_stats():
Jerry Geb1f25012023-03-03 11:33:51 -08001142 ## Rsqrt can only handle positive numbers
1143 elem_signedness = ElemSignedness.ALL_RANGE
1144 if op_name == "rsqrt":
1145 elem_signedness = ElemSignedness.POSITIVE
1146
Jeremy Johnson015c3552022-02-23 12:15:03 +00001147 for i in range(0, args.num_samples):
1148 a = [
Jerry Geb1f25012023-03-03 11:33:51 -08001149 TGen.getRand(shape, tf.float32, rng, elem_signedness)
Jeremy Johnson015c3552022-02-23 12:15:03 +00001150 for shape in placeholder_shapes
1151 ]
1152 yield a
1153
1154 converter.representative_dataset = input_stats
1155 converter.inference_input_type = tflite_inference_dtype
1156 converter.inference_output_type = tflite_inference_dtype
1157
1158 tflite_model = converter.convert()
1159
1160 tflite_model_filename = "model.tflite"
1161
1162 # Write out converted model to disk
1163 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1164 f.write(tflite_model)
1165
1166 else: # is_quantized is False
1167
1168 # 1. Saved out numpy array directly
1169 for idx, (name, val) in enumerate(placeholders):
1170 placeholder_vals.append(tf.convert_to_tensor(val))
Luke Hutton714aa602023-02-08 19:45:26 +00001171
1172 # Complex tensors are expected to be repsesented by a
1173 # single floating point tensor of shape [?, ..., ?, 2].
1174 if val.dtype == np.complex64:
1175 val_shape = val.shape + (2,)
1176 val = val.view(np.float32)
1177 val = val.reshape(val_shape)
1178
Jeremy Johnson015c3552022-02-23 12:15:03 +00001179 np.save(
1180 os.path.join(test_dir, placeholder_npy_filenames[idx]), val, False
1181 )
1182
1183 # 2.a Saved out .pb if framework includes tensorflow
1184 if "tf" not in excluded_framework_list:
1185 # Write out graph as protobuf to disk
1186 tf_model_filename = "model.pb"
1187 tf.io.write_graph(
1188 concrete_function.graph, test_dir, tf_model_filename, True
1189 )
1190
1191 # 2.b Saved out .tflite if framework includes tflite
1192 if "tflite" not in excluded_framework_list:
1193 # Convert the model to TFLite flatbuffer
1194 module = tf.Module()
1195 converter = tf.lite.TFLiteConverter.from_concrete_functions(
1196 [concrete_function], module
1197 )
1198
1199 converter.experimental_new_converter = True
1200
1201 # Even it's non-quantized int32 test, this needs to be set to tf.float32
1202 converter.inference_input_type = tf.float32
1203 converter.inference_output_type = tf.float32
1204 tflite_model = converter.convert()
1205
1206 # Write out converted model to disk
1207 tflite_model_filename = "model.tflite"
1208 with open(os.path.join(test_dir, tflite_model_filename), "wb") as f:
1209 f.write(tflite_model)
1210
1211 # Get TF reference result if .pb is specified
1212 if tf_model_filename:
1213 tf_result_npy_filename = "tf_result.npy"
1214 tf_result = concrete_function(*placeholder_vals)
1215 np.save(os.path.join(test_dir, tf_result_npy_filename), tf_result, False)
1216
1217 tf_result_name = result_name
1218
1219 # Get TFLite inference result if .tflite is specified
1220 if tflite_model_filename:
1221 tflite_result_npy_filename = "tflite_result.npy"
1222
Luke Hutton5c844212023-01-27 14:17:52 +00001223 ops_with_optimized_only_kernel = ["elu", "ceil", "gather", "rfft2d"]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001224
1225 if args.tflite_kernel_mode == "optimized" or (
1226 op_name in ops_with_optimized_only_kernel
1227 ):
1228 interpreter = tf.lite.Interpreter(
1229 model_path=os.path.join(test_dir, tflite_model_filename)
1230 )
1231 elif args.tflite_kernel_mode == "reference":
1232 interpreter = tf.lite.Interpreter(
1233 model_path=os.path.join(test_dir, tflite_model_filename),
1234 experimental_op_resolver_type=OpResolverType.BUILTIN_REF,
1235 )
1236 else:
1237 assert 0, "unknown tflite interpreter mode {}".format(
1238 args.tflite_kernel_mode
1239 )
1240 interpreter.allocate_tensors()
1241
1242 input_details = interpreter.get_input_details()
1243 output_details = interpreter.get_output_details()
1244
1245 assert len(input_details) == len(
1246 placeholder_vals
1247 ), "number of placeholder mismatch"
1248
1249 for idx, val in enumerate(placeholder_vals):
1250 interpreter.set_tensor(input_details[idx]["index"], val.numpy())
1251
1252 interpreter.invoke()
1253 tflite_result = interpreter.get_tensor(output_details[0]["index"])
1254
1255 np.save(
1256 os.path.join(test_dir, tflite_result_npy_filename), tflite_result, False
1257 )
1258
1259 # Result tensor name would change after converting to TFLite flatbuffer
1260 # Overwrite the information from TFLite models directly.
1261 # Assume single result tensor now
1262 tflite_result_name = output_details[0]["name"]
1263
1264 # Write out test descriptor
1265 write_test_json(
1266 filename=os.path.join(test_dir, "test.json"),
1267 tf_model_filename=tf_model_filename,
1268 tf_result_npy_filename=tf_result_npy_filename,
1269 tf_result_name=tf_result_name,
1270 tflite_model_filename=tflite_model_filename,
1271 tflite_result_npy_filename=tflite_result_npy_filename,
1272 tflite_result_name=tflite_result_name,
1273 ifm_name=placeholder_names,
1274 ifm_file=placeholder_npy_filenames,
1275 ifm_shape=placeholder_shapes,
1276 framework_exclusions=excluded_framework_list,
1277 quantized=is_quantized,
1278 )
1279 except Exception as e:
1280 msg = "Error running task: {}".format(e)
1281 print(msg)
1282 print(
1283 "".join(
1284 traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)
1285 )
1286 )
1287 return False
1288 return True
1289
1290
1291def build_const_net(
1292 args,
1293 curr_shape,
1294 op_name,
1295 dtype,
1296 excluded_framework_list,
1297 quantized_inference_dtype,
1298 result_name,
1299 seed,
1300 rng,
1301 filter,
1302 unit_test_args,
1303):
1304
1305 if quantized_inference_dtype:
1306 quant_dtype = get_tf_dtype(quantized_inference_dtype)
1307 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, quant_dtype))
1308 else:
1309 test_dir = "test_{}_{}".format(op_name, get_shape_str(curr_shape, dtype))
1310 test_dir = os.path.join(args.output_dir, test_dir)
1311
1312 # If the operator has an additional function to generate arguments, call it
1313 # here and iterate through the argument list that it generates
1314 op = TF_OP_LIST[op_name]
1315 op_fcn, tensor_gen_fcn, arg_gen_fcn = op["build_fcn"]
1316
TatWai Chongfd629052022-07-25 04:01:58 +00001317 try:
1318 rank_lo, rank_hi = op["rank"]
1319 except KeyError:
1320 # Set testing rank to (1, 4) in default.
1321 rank_lo = 1
1322 rank_hi = 4
1323
1324 if len(curr_shape) not in range(rank_lo, rank_hi + 1):
1325 return
1326
Jeremy Johnson015c3552022-02-23 12:15:03 +00001327 addl_args_tuple = arg_gen_fcn(op, curr_shape, rng)
1328 for desc, addl_args in addl_args_tuple:
Jeremy Johnson0e6218e2022-05-05 17:08:04 +01001329 # Only filter on the full test_name, not the output directory
1330 _, test_name = os.path.split(test_dir + desc)
1331 if not filter or filter.search(test_name):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001332 unit_test_args.append(
1333 [
1334 op_name,
1335 args,
1336 test_dir + desc,
1337 curr_shape,
1338 addl_args,
1339 dtype,
1340 excluded_framework_list,
1341 quantized_inference_dtype,
1342 result_name,
1343 seed,
1344 ]
1345 )
1346
1347
1348# python hash is not reproducible, create hash for our purpose
1349def op_name_hash(op_name):
1350 result = 0xDEADBEEF
1351 for ch in op_name:
1352 if result & 1:
1353 result = (ord(ch) << 24) ^ (result >> 1) ^ 0x82608EDB
1354 else:
1355 result = (ord(ch) << 24) ^ (result >> 1)
1356
1357 return result
1358
1359
1360def generate_op_tests(args, op_name, shape_list, result_name, filter, unit_test_args):
1361
1362 if not args.quiet:
1363 print(
1364 "Generating tests for {} ".format(
1365 op_name
1366 )
1367 )
1368
1369 op = TF_OP_LIST[op_name]
1370
1371 # Seed the RNG so that we get the same random tests for each test each time
1372 # If the number of tests for a given generation function changes, the tests
1373 # for that operator may also change accordingly, but this will at least keep
1374 # down churn across operators.
1375
1376 bounded_hash_val = (args.random_seed + op_name_hash(op_name)) % np.iinfo(
1377 np.int32
1378 ).max
1379 rng = np.random.default_rng(bounded_hash_val)
1380
1381 # this is a dictionary with 'tf' and 'tflite' as key
1382 # and value being the data types we want to test under these framework
1383
1384 if isinstance(op["types"], dict):
1385 try:
1386 tf_dtypes = op["types"]["tf"]
1387 except KeyError:
1388 tf_dtypes = []
1389 try:
1390 tflite_dtypes = op["types"]["tflite"]
1391 except KeyError:
1392 tflite_dtypes = []
1393 elif isinstance(op["types"], list):
1394 tf_dtypes = op["types"]
1395 tflite_dtypes = op["types"]
1396
1397 tf_nonquantized_dtypes = tf_dtypes # tf doesn't support quantized data types
1398 tflite_quantized_dtypes = []
1399 tflite_nonquantized_dtypes = []
1400 for dtype in tflite_dtypes:
1401 if isinstance(dtype, QuantType):
1402 tflite_quantized_dtypes.append(dtype)
1403 else:
1404 tflite_nonquantized_dtypes.append(dtype)
1405
1406 nonquantized_dtypes_set = set(tf_nonquantized_dtypes).union(
1407 set(tflite_nonquantized_dtypes)
1408 )
1409 nonquantized_dtypes = list(nonquantized_dtypes_set)
1410 quantized_dtypes = tflite_quantized_dtypes
1411
1412 # populate non quantized unit test arguments
1413 for dtype in nonquantized_dtypes:
1414
1415 excluded_framework_set = set(ALL_FRAMEWORKS)
1416 if dtype in tf_nonquantized_dtypes:
1417 excluded_framework_set.remove("tf")
1418 if dtype in tflite_nonquantized_dtypes:
1419 excluded_framework_set.remove("tflite")
1420 excluded_framework_list = list(excluded_framework_set)
1421
1422 for curr_shape in shape_list:
1423 build_const_net(
1424 args,
1425 curr_shape,
1426 op_name,
1427 dtype,
1428 excluded_framework_list,
1429 None,
1430 result_name,
1431 bounded_hash_val,
1432 rng,
1433 filter,
1434 unit_test_args,
1435 )
1436
1437 # populate quantized unit test arguments
1438 # must exclude 'tf' and source dtype being tf.float32
1439 for dtype in quantized_dtypes:
1440 for curr_shape in shape_list:
1441 build_const_net(
1442 args,
1443 curr_shape,
1444 op_name,
1445 tf.float32,
1446 ["tf"],
1447 dtype,
1448 result_name,
1449 bounded_hash_val,
1450 rng,
1451 filter,
1452 unit_test_args,
1453 )
1454
1455 return unit_test_args
1456
1457
1458def createDynamicOpLists():
1459 """The templated operators are conv2d-style operators with a number of kernel
1460 sizes. Since the operator is unchanged, we generate the range of kernel
1461 sizes here in this loop and remove the original templates from the list.
1462
1463 This could be expanded to non-conv2d-style operators in the future."""
1464
1465 # Dynamically create op lists for convolutions with a list of kernel sizes
1466 KERNELS = [
1467 [1, 1],
1468 [3, 3],
1469 [5, 5],
1470 ]
1471
TatWai Chongfd629052022-07-25 04:01:58 +00001472 # dim = [D, H, W]
1473 KERNELS_3D = [
1474 [1, 1, 1],
1475 [2, 3, 3],
1476 [3, 5, 5],
1477 ]
1478
Jeremy Johnson015c3552022-02-23 12:15:03 +00001479 TEMPLATE_LIST = [
1480 "conv2d",
1481 "conv2d_bias",
1482 "conv2d_relu",
1483 "conv2d_relu6",
1484 "conv2d_relu_n1_to_1",
1485 "conv2d_tanh",
1486 "depthwise_conv2d",
1487 "depthwise_conv2d_bias",
1488 "transpose_conv2d",
1489 ]
1490
TatWai Chongfd629052022-07-25 04:01:58 +00001491 TEMPLATE_LIST_CONV3D = [
1492 "conv3d",
1493 "conv3d_bias",
1494 ]
1495
Jeremy Johnson015c3552022-02-23 12:15:03 +00001496 for t in TEMPLATE_LIST:
1497 for k in KERNELS:
1498 testName = "{}_{}x{}".format(t, k[0], k[1])
1499 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1500 TF_OP_LIST[testName]["filter"] = k
1501 TF_OP_LIST[testName]["template"] = False
1502
TatWai Chongfd629052022-07-25 04:01:58 +00001503 # The existing operators don't support the dimension of kernel that is higher than 2.
1504 for t in TEMPLATE_LIST_CONV3D:
1505 for k in KERNELS_3D:
1506 testName = "{}_{}x{}x{}".format(t, k[0], k[1], k[2])
1507 TF_OP_LIST[testName] = TF_OP_LIST["{}_TEMPLATE".format(t)].copy()
1508 TF_OP_LIST[testName]["filter"] = k
1509 TF_OP_LIST[testName]["template"] = False
1510
Jeremy Johnson015c3552022-02-23 12:15:03 +00001511 # Delete any templates after having created any dynamic ops
1512 # This is a two-pass operation because it's bad practice to delete
1513 # keys from dictionaries while iterating
1514 keyList = []
1515 for k in TF_OP_LIST:
1516 try:
1517 if TF_OP_LIST[k]["template"]:
1518 keyList.append(k)
1519 continue
1520 except KeyError:
1521 pass
1522
1523 for k in keyList:
1524 del TF_OP_LIST[k]
1525
1526
1527def main():
1528 parser = argparse.ArgumentParser()
1529 parser.add_argument(
1530 "--seed", dest="random_seed", default=42, type=int, help="Random seed"
1531 )
1532 parser.add_argument(
1533 "--random-shapes",
1534 dest="random_shapes",
1535 default=0,
1536 type=int,
1537 help=(
1538 "Use N random shapes of each rank for generating tests,"
1539 "seeded with random seed"
1540 ),
1541 )
1542 parser.add_argument(
1543 "-o",
1544 "--output-dir",
1545 dest="output_dir",
1546 default=".",
1547 type=str,
1548 help="Test output directory path prefix",
1549 )
1550 parser.add_argument(
1551 "-q",
1552 "--quiet",
1553 dest="quiet",
1554 default=False,
1555 action="store_true",
1556 help="Do not print test names",
1557 )
1558 parser.add_argument(
1559 "-j", "--jobs", dest="jobs", type=int, default=1, help="Number of parallel jobs"
1560 )
1561 parser.add_argument(
1562 "-m",
1563 "--tflite-kernel-mode",
1564 dest="tflite_kernel_mode",
1565 type=str,
1566 choices=["reference", "optimized"],
1567 default="reference",
1568 help="TFLite interpreter kernel mode",
1569 )
1570 parser.add_argument(
1571 "--num-samples",
1572 dest="num_samples",
1573 default=200,
1574 type=int,
1575 help="Number of input samples for post-training quantization",
1576 )
1577 parser.add_argument(
1578 "--filter",
1579 dest="filter",
1580 default="",
1581 type=str,
1582 help="Filter test names by this expression",
1583 )
1584 args = parser.parse_args()
1585
1586 # Turn the filter into a re object if present
1587 filter = None
1588 if args.filter != "":
1589 filter = re.compile(args.filter)
1590
1591 # Autodetect CPU count
1592 if args.jobs <= 0:
1593 args.jobs = os.cpu_count()
1594
1595 # Disable TF info messages
1596 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
1597
1598 try:
1599 os.makedirs(args.output_dir)
1600 except FileExistsError:
1601 pass
1602
1603 if args.random_shapes:
1604 gen_rand_shapes(args)
1605
1606 # Build dynamic ops
1607 createDynamicOpLists()
1608
1609 # Generate the test list and arguments to run_unit_test()
1610 unit_test_args = []
1611
1612 for op in TF_OP_LIST:
1613 generate_op_tests(args, op, shape_list, "result", filter, unit_test_args)
1614
1615 errors = 0
1616 for t in unit_test_args:
1617 if not run_unit_test(*t):
1618 errors = errors + 1
1619
1620 if not args.quiet:
1621 print("\nAll tasks done - with {} errors".format(errors))
1622
1623 return 1 if errors else 0
1624
1625
1626if __name__ == "__main__":
1627 exit(main())