blob: 4bf8070fa31a174ec277afedd2a45ada05a25dae [file] [log] [blame]
TatWai Chongbef907a2024-01-23 09:40:37 -08001# Copyright (c) 2020-2024, ARM Limited.
Jeremy Johnson015c3552022-02-23 12:15:03 +00002# SPDX-License-Identifier: Apache-2.0
3import numpy as np
4import tensorflow as tf
5from frameworks.tensor_gen import TGen
6
7
8class TBuilder:
9 """The member functions build the tensorflow operators into small networks
10 for our tests"""
11
12 def __init__(self):
13 pass
14
15 def fake_quant(tensor, tensor_scale, name):
16 """Helper function for quantizing with a scaling parameters structure."""
17 return tf.quantization.fake_quant_with_min_max_args(
18 tensor,
19 min=tensor_scale.min,
20 max=tensor_scale.max,
21 num_bits=tensor_scale.num_bits,
22 narrow_range=tensor_scale.narrow_range,
23 name=name,
24 )
25
26 def fake_quant_params(tensor, min, max, scaling, name):
27 """Helper function for quantizing with individual scaling parameters."""
28 return tf.quantization.fake_quant_with_min_max_args(
29 tensor,
30 min=min,
31 max=max,
32 num_bits=scaling.num_bits,
33 narrow_range=scaling.narrow_range,
34 name=name,
35 )
36
37 class Add:
38 def __init__(self, name):
39 self.result_name = name
40
41 def eval(self, a, b):
42 return tf.add(a, b, name=self.result_name)
43
44 class Sub:
45 def __init__(self, name):
46 self.result_name = name
47
48 def eval(self, a, b):
49 return tf.subtract(a, b, name=self.result_name)
50
51 class Mul:
52 def __init__(self, name):
53 self.result_name = name
54
55 def eval(self, a, b):
56 return tf.multiply(a, b, name=self.result_name)
57
58 class Exp:
59 def __init__(self, name):
60 self.result_name = name
61
62 def eval(self, a):
63 return tf.exp(a, name=self.result_name)
64
65 class Rcp:
66 def __init__(self, name):
67 self.result_name = name
68
69 def eval(self, a):
70 return tf.math.reciprocal(a, name=self.result_name)
71
72 class Relu:
73 def __init__(self, name):
74 self.result_name = name
75
76 def eval(self, a):
77 return tf.nn.relu(a, name=self.result_name)
78
Jerry Ge93912432022-07-22 10:29:13 -070079 class Relu1:
80 def __init__(self, name):
81 self.result_name = name
82
83 def eval(self, a):
84 # TF doesn't have relu_n1_to_1 operator,
85 # use min and max as a workaround
86 # alternatively, we can use clip_by_value
87 return tf.math.minimum(1.0, tf.math.maximum(-1.0, a))
88
Jerry Ge2eea5bf2022-10-11 16:27:05 +000089 class Relu0To1:
90 def __init__(self, name):
91 self.result_name = name
92
93 def eval(self, a):
94 # TF doesn't have relu_0_to_1 operator,
95 # use min and max as a workaround
96 # alternatively, we can use clip_by_value
97 return tf.math.minimum(1.0, tf.math.maximum(0.0, a))
98
Jeremy Johnson015c3552022-02-23 12:15:03 +000099 class Relu6:
100 def __init__(self, name):
101 self.result_name = name
102
103 def eval(self, a):
104 return tf.nn.relu6(a, name=self.result_name)
105
106 class LeakyRelu:
107 def __init__(self, alpha, name):
108 self.alpha = alpha
109 self.result_name = name
110
111 def eval(self, a):
112 return tf.nn.leaky_relu(a, alpha=self.alpha, name=self.result_name)
113
TatWai Chong41a04fe2022-11-03 21:44:32 +0000114 class Prelu:
115 def __init__(self, name):
116 self.result_name = name
117 self.prelu = tf.keras.layers.PReLU(
118 alpha_initializer=tf.keras.initializers.RandomNormal(
119 mean=0.0, stddev=1.0
120 )
121 )
122
123 def eval(self, a):
124 return self.prelu(a)
125
TatWai Chong473eb382022-08-02 04:21:30 +0000126 class Gelu:
127 def __init__(self, name):
128 self.result_name = name
129
130 def eval(self, a):
131 return tf.nn.gelu(a, name=self.result_name)
132
Jeremy Johnson015c3552022-02-23 12:15:03 +0000133 class Concat:
134 def __init__(self, axis, name):
135 self.axis = axis
136 self.result_name = name
137
138 def eval(self, a, b):
Won Jeonf9c0cee2023-09-18 16:32:45 -0700139 return (
140 tf.concat([a, b], self.axis, name=self.result_name)
141 if a.shape != ()
142 else tf.stack([a, b], name=self.result_name)
143 )
Jeremy Johnson015c3552022-02-23 12:15:03 +0000144
145 class BitwiseAnd:
146 def __init__(self, name):
147 self.result_name = name
148
149 def eval(self, a, b):
150 return tf.bitwise.bitwise_and(a, b, name=self.result_name)
151
152 class BitwiseOr:
153 def __init__(self, name):
154 self.result_name = name
155
156 def eval(self, a, b):
157 return tf.bitwise.bitwise_or(a, b, name=self.result_name)
158
159 class BitwiseNot:
160 def __init__(self, name):
161 self.result_name = name
162
163 def eval(self, a):
164 return tf.bitwise.invert(a, name=self.result_name)
165
166 class BitwiseXor:
167 def __init__(self, name):
168 self.result_name = name
169
170 def eval(self, a, b):
171 return tf.bitwise.bitwise_xor(a, b, name=self.result_name)
172
173 class LogicalAnd:
174 def __init__(self, name):
175 self.result_name = name
176
177 def eval(self, a, b):
178 return tf.math.logical_and(a, b, name=self.result_name)
179
180 class LogicalOr:
181 def __init__(self, name):
182 self.result_name = name
183
184 def eval(self, a, b):
185 return tf.math.logical_or(a, b, name=self.result_name)
186
187 class LogicalNot:
188 def __init__(self, name):
189 self.result_name = name
190
191 def eval(self, a):
192 return tf.math.logical_not(a, name=self.result_name)
193
194 class ReduceAny:
195 def __init__(self, axis_list, keepdims, name):
196 self.axis_list = axis_list
197 self.keepdims = keepdims
198 self.result_name = name
199
200 def eval(self, a):
201 return tf.math.reduce_any(
202 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
203 )
204
205 class ReduceAll:
206 def __init__(self, axis_list, keepdims, name):
207 self.axis_list = axis_list
208 self.keepdims = keepdims
209 self.result_name = name
210
211 def eval(self, a):
212 return tf.math.reduce_all(
213 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
214 )
215
216 class ReduceMin:
217 def __init__(self, axis_list, keepdims, name):
218 self.axis_list = axis_list
219 self.keepdims = keepdims
220 self.result_name = name
221
222 def eval(self, a):
223 return tf.math.reduce_min(
224 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
225 )
226
227 class ReduceMax:
228 def __init__(self, axis_list, keepdims, name):
229 self.axis_list = axis_list
230 self.keepdims = keepdims
231 self.result_name = name
232
233 def eval(self, a):
234 return tf.math.reduce_max(
235 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
236 )
237
238 class ReduceSum:
239 def __init__(self, axis_list, keepdims, name):
240 self.axis_list = axis_list
241 self.keepdims = keepdims
242 self.result_name = name
243
244 def eval(self, a):
245 return tf.math.reduce_sum(
246 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
247 )
248
249 class ReduceMean:
250 def __init__(self, axis_list, keepdims, name):
251 self.axis_list = axis_list
252 self.keepdims = keepdims
253 self.result_name = name
254
255 def eval(self, a):
256 return tf.math.reduce_mean(
257 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
258 )
259
260 class ReduceProduct:
261 def __init__(self, axis_list, keepdims, name):
262 self.axis_list = axis_list
263 self.keepdims = keepdims
264 self.result_name = name
265
266 def eval(self, a):
267 return tf.math.reduce_prod(
268 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
269 )
270
271 class Min:
272 def __init__(self, name):
273 self.result_name = name
274
275 def eval(self, a, b):
276 return tf.math.minimum(a, b, name=self.result_name)
277
278 class Max:
279 def __init__(self, name):
280 self.result_name = name
281
282 def eval(self, a, b):
283 return tf.math.maximum(a, b, name=self.result_name)
284
285 class Pow:
286 def __init__(self, name):
287 self.result_name = name
288
289 def eval(self, a, b):
290 return tf.math.pow(a, b, name=self.result_name)
291
292 class Abs:
293 def __init__(self, name):
294 self.result_name = name
295
296 def eval(self, a):
297 return tf.math.abs(a, name=self.result_name)
298
299 class Ceil:
300 def __init__(self, name):
301 self.result_name = name
302
303 def eval(self, a):
304 return tf.math.ceil(a, name=self.result_name)
305
306 class Floor:
307 def __init__(self, name):
308 self.result_name = name
309
310 def eval(self, a):
311 return tf.math.floor(a, name=self.result_name)
312
313 class Log:
314 def __init__(self, name):
315 self.result_name = name
316
317 def eval(self, a):
318 return tf.math.log(a, name=self.result_name)
319
320 class Negate:
321 def __init__(self, name):
322 self.result_name = name
323
324 def eval(self, a):
325 return tf.math.negative(a, name=self.result_name)
326
327 class Rsqrt:
328 def __init__(self, name):
329 self.result_name = name
330
331 def eval(self, a):
332 return tf.math.rsqrt(a, name=self.result_name)
333
TatWai Chongd713a4d2022-11-10 13:54:28 -0800334 class Sign:
335 def __init__(self, name):
336 self.result_name = name
337
338 def eval(self, a):
339 return tf.math.sign(a, name=self.result_name)
340
Jeremy Johnson015c3552022-02-23 12:15:03 +0000341 class Sigmoid:
342 def __init__(self, name):
343 self.result_name = name
344
345 def eval(self, a):
346 return tf.math.sigmoid(a, name=self.result_name)
347
348 class Tanh:
349 def __init__(self, name):
350 self.result_name = name
351
352 def eval(self, a):
353 return tf.math.tanh(a, name=self.result_name)
354
Won Jeon78155c62023-06-10 00:20:04 +0000355 class Erf:
356 # tfl.ops cannot be generated right now.
357 # https://github.com/tensorflow/tensorflow/issues/60809
358 def __init__(self, name):
359 self.result_name = name
360
361 def eval(self, a):
362 return tf.math.erf(a, name=self.result_name)
363
Luke Hutton41601862022-12-06 17:29:15 +0000364 class Sin:
365 def __init__(self, name):
366 self.result_name = name
367
368 def eval(self, a):
369 return tf.math.sin(a, name=self.result_name)
370
371 class Cos:
372 def __init__(self, name):
373 self.result_name = name
374
375 def eval(self, a):
376 return tf.math.cos(a, name=self.result_name)
377
Luke Hutton2138a192022-12-15 11:01:39 +0000378 class Atan2:
379 def __init__(self, name):
380 self.result_name = name
381
382 def eval(self, a, b):
383 return tf.math.atan2(a, b, name=self.result_name)
384
Jeremy Johnson015c3552022-02-23 12:15:03 +0000385 class Square:
386 def __init__(self, name):
387 self.result_name = name
388
389 def eval(self, a):
390 return tf.math.square(a, name=self.result_name)
391
392 class SquaredDifference:
393 def __init__(self, name):
394 self.result_name = name
395
396 def eval(self, a, b):
397 return tf.math.squared_difference(a, b, name=self.result_name)
398
399 class Equal:
400 def __init__(self, name):
401 self.result_name = name
402
403 def eval(self, a, b):
404 return tf.math.equal(a, b, name=self.result_name)
405
406 class GreaterEqual:
407 def __init__(self, name):
408 self.result_name = name
409
410 def eval(self, a, b):
411 return tf.math.greater_equal(a, b, name=self.result_name)
412
413 class Greater:
414 def __init__(self, name):
415 self.result_name = name
416
417 def eval(self, a, b):
418 return tf.math.greater(a, b, name=self.result_name)
419
420 class Less:
421 def __init__(self, name):
422 self.result_name = name
423
424 def eval(self, a, b):
425 return tf.math.less(a, b, name=self.result_name)
426
427 class LessEqual:
428 def __init__(self, name):
429 self.result_name = name
430
431 def eval(self, a, b):
432 return tf.math.less_equal(a, b, name=self.result_name)
433
434 class Conv2d:
435 def __init__(self, weight, strides, padding, dilations, name):
436 self.weight = weight
437 self.strides = strides
438 self.padding = padding
439 self.dilations = dilations
440 self.result_name = name
441
442 def eval(self, input):
443 return tf.nn.conv2d(
444 input,
445 self.weight,
446 self.strides,
447 self.padding,
448 data_format="NHWC",
449 dilations=self.dilations,
450 name=self.result_name,
451 )
452
453 class Conv2dRelu:
454 def __init__(self, weight, name):
455 self.weight = weight
456 self.result_name = name
457
458 def eval(self, input):
459 conv2d = tf.nn.conv2d(
460 input,
461 self.weight,
462 [1, 1, 1, 1],
463 "SAME",
464 data_format="NHWC",
465 dilations=[1, 1, 1, 1],
466 name="conv2d",
467 )
468 return tf.nn.relu(conv2d, name=self.result_name)
469
470 class Conv2dRelu6:
471 def __init__(self, weight, name):
472 self.weight = weight
473 self.result_name = name
474
475 def eval(self, input):
476 conv2d = tf.nn.conv2d(
477 input,
478 self.weight,
479 [1, 1, 1, 1],
480 "SAME",
481 data_format="NHWC",
482 dilations=[1, 1, 1, 1],
483 name="conv2d",
484 )
485 return tf.nn.relu6(conv2d, name=self.result_name)
486
487 class Conv2dReluN1To1:
488 def __init__(self, weight, name):
489 self.weight = weight
490 self.result_name = name
491
492 def eval(self, input):
493 conv2d = tf.nn.conv2d(
494 input,
495 self.weight,
496 [1, 1, 1, 1],
497 "SAME",
498 data_format="NHWC",
499 dilations=[1, 1, 1, 1],
500 name="conv2d",
501 )
502 return tf.clip_by_value(conv2d, -1.0, 1.0, name=self.result_name)
503
504 class Conv2dTanh:
505 def __init__(self, weight, name):
506 self.weight = weight
507 self.result_name = name
508
509 def eval(self, input):
510 conv2d = tf.nn.conv2d(
511 input,
512 self.weight,
513 [1, 1, 1, 1],
514 "SAME",
515 data_format="NHWC",
516 dilations=[1, 1, 1, 1],
517 name="conv2d",
518 )
519 return tf.math.tanh(conv2d, name=self.result_name)
520
521 class Conv2dWithBias:
522 def __init__(self, weight, bias, strides, padding, dilations, name):
523 self.weight = weight
524 self.bias = bias
525 self.strides = strides
526 self.padding = padding
527 self.dilations = dilations
528 self.result_name = name
529
530 def eval(self, input):
531 conv2d_op = tf.nn.conv2d(
532 input,
533 self.weight,
534 self.strides,
535 self.padding,
536 data_format="NHWC",
537 dilations=self.dilations,
538 name="conv2d",
539 )
540 bias_add_op = tf.nn.bias_add(
541 conv2d_op, self.bias, data_format="NHWC", name=self.result_name
542 )
543 return bias_add_op
544
TatWai Chongfd629052022-07-25 04:01:58 +0000545 class Conv3d:
546 def __init__(self, weight, strides, padding, dilations, name):
547 self.weight = weight
548 self.strides = strides
549 self.padding = padding
550 self.dilations = dilations
551 self.result_name = name
552
553 def eval(self, input):
554 return tf.nn.conv3d(
555 input,
556 self.weight,
557 self.strides,
558 self.padding,
559 data_format="NDHWC",
560 dilations=self.dilations,
561 name=self.result_name,
562 )
563
564 class Conv3dWithBias:
565 def __init__(self, weight, bias, strides, padding, dilations, name):
566 self.weight = weight
567 self.bias = bias
568 self.strides = strides
569 self.padding = padding
570 self.dilations = dilations
571 self.result_name = name
572
573 def eval(self, input):
574 conv3d_op = tf.nn.conv3d(
575 input,
576 self.weight,
577 self.strides,
578 self.padding,
579 data_format="NDHWC",
580 dilations=self.dilations,
581 name="conv3d",
582 )
583 bias_add_op = tf.nn.bias_add(conv3d_op, self.bias, name=self.result_name)
584 return bias_add_op
585
Jeremy Johnson015c3552022-02-23 12:15:03 +0000586 class DepthwiseConv2d:
587 def __init__(self, weight, strides, padding, dilations, name):
588 self.weight = weight
589 self.strides = strides
590 self.padding = padding
591 self.dilations = dilations
592 self.result_name = name
593
594 def eval(self, input):
595 dws_conv2d = tf.nn.depthwise_conv2d(
596 input,
597 self.weight,
598 self.strides,
599 self.padding,
600 data_format="NHWC",
601 dilations=self.dilations,
602 name="dws_conv2d",
603 )
604 return tf.identity(dws_conv2d, name=self.result_name)
605
606 class DepthwiseConv2dWithBias:
607 def __init__(self, weight, bias, strides, padding, dilations, name):
608 self.weight = weight
609 self.bias = bias
610 self.strides = strides
611 self.padding = padding
612 self.dilations = dilations
613 self.result_name = name
614
615 def eval(self, input):
616 dws_conv2d = tf.nn.depthwise_conv2d(
617 input,
618 self.weight,
619 self.strides,
620 self.padding,
621 data_format="NHWC",
622 dilations=self.dilations,
623 name="dws_conv2d",
624 )
625 bias_add_op = tf.nn.bias_add(
626 dws_conv2d, self.bias, data_format="NHWC", name=self.result_name
627 )
628 return bias_add_op
629
630 class TransposeConv2d:
631 def __init__(self, weight, output_shape, strides, padding, name):
632 self.weight = weight
633 self.output_shape = output_shape
634 self.strides = strides
635 self.padding = padding
636 self.result_name = name
637
638 def eval(self, input):
639 return tf.nn.conv2d_transpose(
640 input,
641 self.weight,
642 self.output_shape,
643 self.strides,
644 self.padding,
645 data_format="NHWC",
646 name=self.result_name,
647 )
648
649 class Argmax:
650 def __init__(self, axis, name):
651 self.axis = axis
652 self.result_name = name
653
654 def eval(self, a):
655 return tf.argmax(a, self.axis, output_type=tf.int32, name=self.result_name)
656
657 class AvgPool2d:
658 def __init__(self, strides, kernel_size, padding, name):
659 self.strides = strides
660 self.kernel_size = kernel_size
661 self.padding = padding
662 self.result_name = name
663
664 def eval(self, input):
665 return tf.nn.avg_pool2d(
666 input,
667 strides=self.strides,
668 ksize=self.kernel_size,
669 padding=self.padding,
670 data_format="NHWC",
671 name=self.result_name,
672 )
673
674 class MaxPool2d:
675 def __init__(self, strides, kernel_size, padding, name):
676 self.strides = strides
677 self.kernel_size = kernel_size
678 self.padding = padding
679 self.result_name = name
680
681 def eval(self, input):
682 return tf.nn.max_pool2d(
683 input,
684 strides=self.strides,
685 ksize=self.kernel_size,
686 padding=self.padding,
687 data_format="NHWC",
688 name=self.result_name,
689 )
690
691 class Reshape:
692 def __init__(self, shape, name):
693 self.shape = shape
694 self.result_name = name
695
696 def eval(self, a):
697 reshape_op = tf.reshape(a, self.shape)
698 return tf.identity(reshape_op, name=self.result_name)
699
700 class Transpose:
701 def __init__(self, perm, name):
702 self.perm = perm
703 self.result_name = name
704
705 def eval(self, a):
706 return tf.transpose(a, self.perm, name=self.result_name)
707
708 class Slice:
709 def __init__(self, begin, size, name):
710 self.begin = begin
711 self.size = size
712 self.result_name = name
713
714 def eval(self, a):
715 return tf.slice(a, begin=self.begin, size=self.size, name=self.result_name)
716
717 class StridedSlice:
718 def __init__(
719 self,
720 begin,
721 end,
722 strides,
723 begin_mask,
724 end_mask,
725 ellipsis_mask,
726 new_axis_mask,
727 shrink_axis_mask,
728 name,
729 ):
730 self.begin = begin
731 self.end = end
732 self.strides = strides
733 self.begin_mask = begin_mask
734 self.end_mask = end_mask
735 self.ellipsis_mask = ellipsis_mask
736 self.new_axis_mask = new_axis_mask
737 self.shrink_axis_mask = shrink_axis_mask
738 self.result_name = name
739
740 def eval(self, a):
741 return tf.strided_slice(
742 a,
743 begin=self.begin,
744 end=self.end,
745 strides=self.strides,
746 begin_mask=self.begin_mask,
747 end_mask=self.end_mask,
748 ellipsis_mask=self.ellipsis_mask,
749 new_axis_mask=self.new_axis_mask,
750 shrink_axis_mask=self.shrink_axis_mask,
751 name=self.result_name,
752 )
753
754 class Select:
755 def __init__(self, name):
756 self.result_name = name
757
758 def eval(self, selector, a, b):
759 return tf.where(condition=selector, x=a, y=b, name=self.result_name)
760
761 class Addn:
762 def __init__(self, name):
763 self.result_name = name
764
765 def eval(self, a, b, c, d):
766 return tf.add_n([a, b, c, d], name=self.result_name)
767
768 class Concatv2:
769 def __init__(self, axis, name):
770 self.axis = axis
771 self.result_name = name
772
773 def eval(self, a, b, c, d):
Won Jeonf9c0cee2023-09-18 16:32:45 -0700774 return (
775 tf.concat([a, b, c, d], axis=self.axis, name=self.result_name)
776 if a.shape != ()
777 else tf.stack([a, b, c, d], name=self.result_name)
778 )
Jeremy Johnson015c3552022-02-23 12:15:03 +0000779
780 class Stack:
781 def __init__(self, axis, name):
782 self.axis = axis
783 self.result_name = name
784
785 def eval(self, a, b, c, d):
786 return tf.stack([a, b, c, d], axis=self.axis, name=self.result_name)
787
788 class Unstack:
789 def __init__(self, axis, name):
790 self.axis = axis
791 self.result_name = name
792
793 def eval(self, a):
794 unstack_op = tf.unstack(a, axis=self.axis, name="unstack_op")
795 result_count = a.shape[self.axis]
796
797 if result_count == 1:
798 return tf.identity(unstack_op[0], name=self.result_name)
799
800 sums = []
801 for i in range(result_count):
802 sums.append(
803 tf.math.reduce_sum(unstack_op[i], name="reduce_{}".format(i))
804 )
805 return tf.stack(sums, 0, name=self.result_name)
806
TatWai Chongf7008da2022-09-09 09:35:40 +0000807 class MirrorPad:
808 def __init__(self, padding, mode, name):
809 self.padding = padding
810 self.mode = mode
811 self.result_name = name
812
813 def eval(self, a):
814 return tf.pad(
815 a,
816 self.padding,
817 mode=self.mode,
818 constant_values=0,
819 name=self.result_name,
820 )
821
Jeremy Johnson015c3552022-02-23 12:15:03 +0000822 class Pad:
TatWai Chong2226f902023-02-22 18:38:01 -0800823 def __init__(self, padding, pad_const, name):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000824 self.padding = padding
TatWai Chong2226f902023-02-22 18:38:01 -0800825 self.pad_const = pad_const
Jeremy Johnson015c3552022-02-23 12:15:03 +0000826 self.result_name = name
827
828 def eval(self, a):
829 return tf.pad(
830 a,
831 self.padding,
832 mode="CONSTANT",
TatWai Chong2226f902023-02-22 18:38:01 -0800833 constant_values=self.pad_const,
Jeremy Johnson015c3552022-02-23 12:15:03 +0000834 name=self.result_name,
835 )
836
837 class ExpandDims:
838 def __init__(self, axis, name):
839 self.axis = axis
840 self.result_name = name
841
842 def eval(self, a):
843 return tf.expand_dims(a, self.axis, name=self.result_name)
844
845 class Shape:
846 def __init__(self, name):
847 self.result_name = name
848
849 def eval(self, a):
850 return tf.shape(a, name=self.result_name)
851
852 class Rank:
853 def __init__(self, name):
854 self.result_name = name
855
856 def eval(self, a):
857 return tf.rank(a, name=self.result_name)
858
859 class Fill:
860 def __init__(self, shape, value, name):
861 self.shape = shape
862 self.value = value
863 self.result_name = name
864
865 def eval(self, a):
866 return tf.fill(self.shape, self.value, name=self.result_name)
867
868 class Elu:
869 def __init__(self, name):
870 self.result_name = name
871
872 def eval(self, a):
873 return tf.nn.elu(a, name=self.result_name)
874
875 class Softmax:
876 def __init__(self, name):
877 self.result_name = name
878
879 def eval(self, a):
880 return tf.nn.softmax(a, name=self.result_name)
881
882 class LogSoftmax:
883 def __init__(self, name):
884 self.result_name = name
885
886 def eval(self, a):
887 return tf.nn.log_softmax(a, name=self.result_name)
888
Jerry Ge28811d92023-12-05 00:53:26 +0000889 class DynamicLinear:
890 def __init__(self, dynamic_input_shape, name):
891 self.result_name = name
892 self.model = tf.keras.Sequential(
893 [
894 tf.keras.layers.Input(shape=dynamic_input_shape),
895 tf.keras.layers.Dense(units=5),
896 ]
897 )
898
899 def eval(self, a):
900 return self.model(a)
901
Jeremy Johnson015c3552022-02-23 12:15:03 +0000902 class MatMul:
903 def __init__(self, name):
904 self.result_name = name
905
906 def eval(self, a, b):
907 return tf.linalg.matmul(a, b, name=self.result_name)
908
909 class AddScalar:
910 def __init__(self, name):
911 self.result_name = name
912
913 def eval(self, a):
914 return tf.add(a, 1, name=self.result_name)
915
916 class Add1d:
917 def __init__(self, name):
918 self.result_name = name
919
920 def eval(self, a, b):
921 if len(b.shape) > 1:
922 b_1d = tf.reduce_sum(b, axis=list(range(0, len(b.shape) - 1, 1)))
923 else:
924 b_1d = b
925 return tf.add(a, b_1d, name=self.result_name)
926
927 class Split:
928 def __init__(self, num_splits, axis, name):
929 self.num_splits = num_splits
930 self.axis = axis
931 self.result_name = name
932
933 def eval(self, a):
934 # The split op generates a list of outputs. Since we have difficulty
935 # serializing a list or array of Numpy arrays, we will reduce each of
936 # the results
937
938 if not isinstance(self.num_splits, list):
939 split_op = tf.split(
940 a, num_or_size_splits=self.num_splits, axis=self.axis, name="split"
941 )
942 result_count = self.num_splits
943 else:
944 num_split = np.asarray(self.num_splits, dtype=np.int32)
945 split_vec_op = tf.compat.v1.constant(
946 num_split,
947 shape=num_split.shape,
948 dtype=tf.int32,
949 name="const_split_vec",
950 )
951 split_op = tf.split(
952 a, num_or_size_splits=split_vec_op, axis=self.axis, name="split"
953 )
954 result_count = num_split.shape[0]
955
956 sums = []
957 for i in range(result_count):
958 sums.append(tf.math.reduce_sum(split_op[i], name="reduce_{}".format(i)))
959 return tf.stack(sums, 0, name=self.result_name)
960
961 class Tile:
962 def __init__(self, multiples, name):
963 self.multiples = multiples
964 self.result_name = name
965
966 def eval(self, a):
967 t = tf.tile(a, self.multiples, name="tile")
968 return tf.identity(t, name=self.result_name)
969
970 class Reverse:
971 def __init__(self, axis, name):
972 self.axis = axis
973 self.result_name = name
974
975 def eval(self, a):
976 return tf.reverse(a, [self.axis], name=self.result_name)
977
978 class Gather:
979 def __init__(self, indices, batch_dims, axis, name):
980 self.indices = indices
981 self.batch_dims = batch_dims
982 self.axis = axis
983 self.result_name = name
984
985 def eval(self, a):
986 return tf.gather(
987 a,
988 self.indices,
989 batch_dims=self.batch_dims,
990 axis=self.axis,
991 name=self.result_name,
992 )
993
994 class GatherNd:
995 def __init__(self, indices, name):
996 self.indices = indices
997 self.result_name = name
998
999 def eval(self, a):
1000 return tf.gather_nd(a, self.indices, name=self.result_name)
1001
1002 class ScatterNd:
1003 def __init__(self, shape, indices_shape, N, rng, name):
1004 self.shape = shape
1005 self.indices_shape = indices_shape
1006 self.N = N
1007 self.rng = rng
1008 self.result_name = name
1009
1010 def eval(self, a):
1011
1012 # This operator is special. The indices and updates tensors really need
1013 # to be created together, but in the current structure of this tool there
1014 # is no way to do that before now. The number of updates is determined by
1015 # the indices, so we can really only create that after indices; but we
1016 # don't know the type at that time.
1017 #
1018 # Shapes are guaranteed deterministic, but we'll use our rng
1019 # copied from the arggen stage. It's possible that index and
1020 # update *values* will be non-deterministic.
1021 #
1022 # We take the tensor_tensor simply to get the dtype.
1023
1024 shape_const = tf.constant(self.shape, tf.int32)
1025
1026 updates_shape = list(self.indices_shape[:-1])
1027 updates_shape.extend(self.shape[self.indices_shape[-1] :])
1028
1029 updates_const = tf.constant(TGen.getRand(updates_shape, a.dtype, self.rng))
1030
1031 indices = np.zeros(self.indices_shape, dtype=np.int32)
1032
1033 # We need to generate the random indices tensor based on the
1034 # limits of 'shape' for each dimension. Surely, there is a faster
1035 # vectorized way to do this, but the tensors are fairly small so we
1036 # will do this one element at a time. Each element needs to be sized based
1037 # on the size of the last dimension.
1038 for idx in np.ndindex(indices.shape):
1039 indices[idx] = self.rng.integers(0, self.shape[idx[-1]], size=1)[0]
1040 # print('{} {}'.format(idx, indices[idx]))
1041
1042 indices_const = tf.constant(indices, dtype=tf.int32)
1043
1044 return tf.scatter_nd(
1045 indices=indices_const,
1046 updates=updates_const,
1047 shape=shape_const,
1048 name=self.result_name,
1049 )
1050
1051 class SpaceToBatch:
1052 def __init__(self, block_shape, padding, name):
1053 self.block_shape = block_shape
1054 self.padding = padding
1055 self.result_name = name
1056
1057 def eval(self, a):
1058 return tf.space_to_batch(
1059 a, self.block_shape, self.padding, name=self.result_name
1060 )
1061
TatWai Chongbef907a2024-01-23 09:40:37 -08001062 class DynamicSpaceToBatch:
1063 def __init__(self, block_shape, padding, dynamic_input_shape, name):
1064 self.result_name = name
1065
1066 dynamic_input_shape_with_batch = list(dynamic_input_shape)
1067 dynamic_input_shape_no_batch = dynamic_input_shape_with_batch[1:]
1068 dynamic_input_shape_no_batch = tuple(dynamic_input_shape_no_batch)
1069
1070 self.model = tf.keras.Sequential(
1071 [
1072 tf.keras.layers.Input(shape=dynamic_input_shape_no_batch),
1073 tf.keras.layers.Lambda(
1074 lambda x: tf.space_to_batch(x, block_shape, padding, name=None)
1075 ),
1076 ]
1077 )
1078
1079 def eval(self, a):
1080 return self.model(a)
1081
Jeremy Johnson015c3552022-02-23 12:15:03 +00001082 class BatchToSpace:
1083 def __init__(self, block_shape, cropping, name):
1084 self.block_shape = block_shape
1085 self.cropping = cropping
1086 self.result_name = name
1087
1088 def eval(self, a):
1089 # transpose to swap depth and batch first. this could avoid adding new shape
1090 block_rank = len(self.block_shape)
1091 perm = [len(a.shape) - 1]
1092 for i in range(block_rank):
1093 perm.append(i + 1)
1094 perm.append(0)
1095 transpose_op = tf.transpose(a, perm)
1096 return tf.batch_to_space(
1097 transpose_op, self.block_shape, self.cropping, name=self.result_name
1098 )
1099
Jerry Ge28811d92023-12-05 00:53:26 +00001100 class DynamicBatchToSpace:
1101 def __init__(self, block_shape, cropping, dynamic_input_shape, name):
1102 self.result_name = name
1103
1104 dynamic_input_shape_with_batch = list(dynamic_input_shape)
1105 dynamic_input_shape_no_batch = dynamic_input_shape_with_batch[1:]
1106 dynamic_input_shape_no_batch = tuple(dynamic_input_shape_no_batch)
1107
1108 self.model = tf.keras.Sequential(
1109 [
1110 tf.keras.layers.Input(shape=dynamic_input_shape_no_batch),
1111 tf.keras.layers.Lambda(
1112 lambda x: tf.batch_to_space(x, block_shape, cropping, name=None)
1113 ),
1114 ]
1115 )
1116
1117 def eval(self, a):
1118 return self.model(a)
1119
Jeremy Johnson015c3552022-02-23 12:15:03 +00001120 class SpaceToDepth:
1121 def __init__(self, block_shape, name):
1122 self.block_shape = block_shape
1123 self.result_name = name
1124
1125 def eval(self, a):
1126 return tf.nn.space_to_depth(a, self.block_shape, name=self.result_name)
1127
Jerry Ge28811d92023-12-05 00:53:26 +00001128 class DynamicSpaceToDepth:
1129 def __init__(self, dynamic_input_shape, name):
1130 self.result_name = name
1131
1132 dynamic_input_shape_with_batch = list(dynamic_input_shape)
1133 dynamic_input_shape_no_batch = dynamic_input_shape_with_batch[1:]
1134 dynamic_input_shape_no_batch = tuple(dynamic_input_shape_no_batch)
1135
1136 self.model = tf.keras.Sequential(
1137 [
1138 tf.keras.layers.Input(shape=dynamic_input_shape_no_batch),
1139 tf.keras.layers.Lambda(
1140 lambda x: tf.nn.space_to_depth(
1141 x, 2, data_format="NHWC", name=None
1142 )
1143 ),
1144 ]
1145 )
1146
1147 def eval(self, a):
1148 return self.model(a)
1149
Jeremy Johnson015c3552022-02-23 12:15:03 +00001150 class DepthToSpace:
1151 def __init__(self, block_shape, name):
1152 self.block_shape = block_shape
1153 self.result_name = name
1154
1155 def eval(self, a):
1156 return tf.nn.depth_to_space(a, self.block_shape, name=self.result_name)
1157
Jerry Ge28811d92023-12-05 00:53:26 +00001158 class DynamicDepthToSpace:
1159 def __init__(self, dynamic_input_shape, name):
1160 self.result_name = name
1161
1162 dynamic_input_shape_with_batch = list(dynamic_input_shape)
1163 dynamic_input_shape_no_batch = dynamic_input_shape_with_batch[1:]
1164 dynamic_input_shape_no_batch = tuple(dynamic_input_shape_no_batch)
1165
1166 self.model = tf.keras.Sequential(
1167 [
1168 tf.keras.layers.Input(shape=dynamic_input_shape_no_batch),
1169 tf.keras.layers.Lambda(
1170 lambda x: tf.nn.depth_to_space(
1171 x, 2, data_format="NHWC", name=None
1172 )
1173 ),
1174 ]
1175 )
1176
1177 def eval(self, a):
1178 return self.model(a)
1179
Jeremy Johnson015c3552022-02-23 12:15:03 +00001180 class OneHot:
1181 def __init__(self, depth, axis, name):
1182 self.depth = depth
1183 self.axis = axis
1184 self.result_name = name
1185
1186 def eval(self, indices, on_value, off_value):
1187 return tf.one_hot(
1188 indices,
1189 self.depth,
1190 on_value,
1191 off_value,
1192 self.axis,
1193 on_value.dtype,
1194 self.result_name,
1195 )
1196
1197 class Fakequant:
1198 def __init__(self, num_bits, narrow_range, name):
1199 self.num_bits = num_bits
1200 self.narrow_range = narrow_range
1201 self.result_name = name
1202
1203 def eval(self, a):
1204 return tf.quantization.fake_quant_with_min_max_args(
1205 a,
1206 min=-2.0,
1207 max=2.0,
1208 num_bits=self.num_bits,
1209 narrow_range=self.narrow_range,
1210 name=self.result_name,
1211 )
1212
TatWai Chong0cef07e2023-02-27 13:22:52 -08001213 class Resize:
1214 def __init__(self, mode, align, half, scale, name):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001215 self.result_name = name
TatWai Chong0cef07e2023-02-27 13:22:52 -08001216 self.mode = mode
1217 self.align = align
1218 self.half = half
1219 self.scale = scale
Jeremy Johnson015c3552022-02-23 12:15:03 +00001220
1221 def eval(self, a):
1222 out_shape = []
TatWai Chong0cef07e2023-02-27 13:22:52 -08001223 out_shape.append(a.shape[1] * self.scale)
1224 out_shape.append(a.shape[2] * self.scale)
Jeremy Johnson015c3552022-02-23 12:15:03 +00001225
TatWai Chong0cef07e2023-02-27 13:22:52 -08001226 tf_resize_dict = (
1227 {"tf_resize_func": tf.compat.v1.image.resize_nearest_neighbor}
1228 if (self.mode == "nearest")
1229 else {"tf_resize_func": tf.compat.v1.image.resize_bilinear}
1230 )
1231 resize = tf_resize_dict["tf_resize_func"](
Jeremy Johnson015c3552022-02-23 12:15:03 +00001232 a,
1233 out_shape,
TatWai Chong0cef07e2023-02-27 13:22:52 -08001234 align_corners=self.align,
Jeremy Johnson015c3552022-02-23 12:15:03 +00001235 name="resize",
TatWai Chong0cef07e2023-02-27 13:22:52 -08001236 half_pixel_centers=self.half,
TatWai Chongf7326092022-06-08 12:17:14 -07001237 )
1238 return tf.identity(resize, name=self.result_name)
1239
Jeremy Johnson015c3552022-02-23 12:15:03 +00001240 class LeftShift:
1241 def __init__(self, shift, name):
1242 self.shift = shift
1243 self.result_name = name
1244
1245 def eval(self, a):
1246 return tf.bitwise.left_shift(a, self.shift, name=self.result_name)
1247
1248 class RightShift:
1249 def __init__(self, shift, name):
1250 self.shift = shift
1251 self.result_name = name
1252
1253 def eval(self, a):
1254 return tf.bitwise.right_shift(a, self.shift, name=self.result_name)
Jerry Ge9e94af82022-10-27 09:57:00 -07001255
1256 class While:
1257 def __init__(self, name):
1258 self.result_name = name
1259
1260 def while_cond(self, x):
1261 return tf.reduce_sum(x) < self.cap
1262
1263 def while_body(self, x):
1264 return tf.add(x, tf.math.sigmoid(x))
1265
1266 def eval(self, a):
1267 self.cap = tf.cast(
1268 tf.constant(
1269 2.0,
1270 shape=[
1271 1,
1272 ],
1273 ),
1274 a.dtype,
1275 )
1276
1277 result = tf.while_loop(
1278 self.while_cond, self.while_body, [a], name=self.result_name
1279 )
1280
1281 return result[0]
1282
Tai Lycf84bc92023-09-07 20:49:09 +00001283 class LSTM(tf.Module):
Jerry Ge9e94af82022-10-27 09:57:00 -07001284 def __init__(self, name):
1285 self.result_name = name
1286 self.lstm = tf.keras.layers.LSTM(
1287 2,
1288 activation="tanh",
1289 unroll=False,
1290 recurrent_activation="sigmoid",
1291 use_bias=True,
1292 recurrent_initializer="ones",
1293 kernel_initializer="ones",
1294 )
1295
1296 def eval(self, a):
1297 return self.lstm(a)
1298
Tai Lycf84bc92023-09-07 20:49:09 +00001299 class SLSTM(tf.Module):
1300 def __init__(self, name):
1301 self.result_name = name
1302 self.lstm = tf.keras.layers.LSTM(
1303 2,
1304 stateful=True,
1305 activation="tanh",
1306 unroll=False,
1307 recurrent_activation="sigmoid",
1308 use_bias=True,
1309 recurrent_initializer="ones",
1310 kernel_initializer="ones",
1311 )
1312
1313 def eval(self, a):
1314 return self.lstm(a)
1315
Jerry Ge9e94af82022-10-27 09:57:00 -07001316 class GRU:
1317 def __init__(self, name):
1318 self.result_name = name
1319 self.lstm = tf.keras.layers.GRU(
1320 2,
1321 recurrent_activation="sigmoid",
1322 use_bias=True,
1323 recurrent_initializer="ones",
1324 kernel_initializer="ones",
1325 )
1326
1327 def eval(self, a):
1328 return self.lstm(a)
1329
1330 class RNN:
1331 def __init__(self, name):
1332 self.result_name = name
1333 basic_cell = tf.keras.layers.SimpleRNNCell(
1334 units=2,
1335 activation="sigmoid",
1336 use_bias=True,
1337 recurrent_initializer="ones",
1338 )
1339 self.rnn = tf.keras.layers.RNN(basic_cell, unroll=False)
1340
1341 def eval(self, a):
1342 return self.rnn(a)
1343
1344 class FullyConnected:
1345 def __init__(self, name):
1346 self.result_name = name
1347 self.dense = tf.keras.layers.Dense(2)
1348
1349 def eval(self, a):
1350 return self.dense(a)
Luke Hutton261b7b62023-01-10 14:50:31 +00001351
1352 class RFFT2d:
1353 def __init__(self, fft_length, name):
1354 self.fft_length = fft_length
1355 self.result_name = name
1356
1357 def eval(self, a):
1358 return tf.signal.rfft2d(a, self.fft_length, name=self.result_name)
Luke Hutton714aa602023-02-08 19:45:26 +00001359
1360 class Real:
1361 def __init__(self, name):
1362 self.result_name = name
1363
1364 def eval(self, a):
1365 return tf.math.real(a, name=self.result_name)
1366
1367 class Imag:
1368 def __init__(self, name):
1369 self.result_name = name
1370
1371 def eval(self, a):
1372 return tf.math.imag(a, name=self.result_name)
Tai Lyfe36fa92023-06-01 21:45:12 +00001373
1374 class BroadcastTo:
1375 def __init__(self, shape, name):
1376 self.shape = shape
1377 self.result_name = name
1378
1379 def eval(self, a):
1380 return tf.broadcast_to(a, shape=self.shape, name=self.result_name)
Tai Lycf84bc92023-09-07 20:49:09 +00001381
1382 class CallOnce(tf.Module):
1383 def __init__(self, name):
1384 print(tf.__version__)
1385 self.result_name = name
1386 self.var = tf.Variable([1.0])
1387
1388 @tf.function(
1389 input_signature=[
1390 tf.TensorSpec(
1391 shape=[
1392 1,
1393 ],
1394 dtype=tf.float32,
1395 )
1396 ]
1397 )
1398 def eval(self, a):
1399 return self.var.assign([2.0])