blob: 7b20cef73d75b79f7dede59020382688459f6cf1 [file] [log] [blame]
Jerry Ge9e94af82022-10-27 09:57:00 -07001# Copyright (c) 2020-2023, ARM Limited.
Jeremy Johnson015c3552022-02-23 12:15:03 +00002# SPDX-License-Identifier: Apache-2.0
3import numpy as np
4import tensorflow as tf
5from frameworks.tensor_gen import TGen
6
7
8class TBuilder:
9 """The member functions build the tensorflow operators into small networks
10 for our tests"""
11
12 def __init__(self):
13 pass
14
15 def fake_quant(tensor, tensor_scale, name):
16 """Helper function for quantizing with a scaling parameters structure."""
17 return tf.quantization.fake_quant_with_min_max_args(
18 tensor,
19 min=tensor_scale.min,
20 max=tensor_scale.max,
21 num_bits=tensor_scale.num_bits,
22 narrow_range=tensor_scale.narrow_range,
23 name=name,
24 )
25
26 def fake_quant_params(tensor, min, max, scaling, name):
27 """Helper function for quantizing with individual scaling parameters."""
28 return tf.quantization.fake_quant_with_min_max_args(
29 tensor,
30 min=min,
31 max=max,
32 num_bits=scaling.num_bits,
33 narrow_range=scaling.narrow_range,
34 name=name,
35 )
36
37 class Add:
38 def __init__(self, name):
39 self.result_name = name
40
41 def eval(self, a, b):
42 return tf.add(a, b, name=self.result_name)
43
44 class Sub:
45 def __init__(self, name):
46 self.result_name = name
47
48 def eval(self, a, b):
49 return tf.subtract(a, b, name=self.result_name)
50
51 class Mul:
52 def __init__(self, name):
53 self.result_name = name
54
55 def eval(self, a, b):
56 return tf.multiply(a, b, name=self.result_name)
57
58 class Exp:
59 def __init__(self, name):
60 self.result_name = name
61
62 def eval(self, a):
63 return tf.exp(a, name=self.result_name)
64
65 class Rcp:
66 def __init__(self, name):
67 self.result_name = name
68
69 def eval(self, a):
70 return tf.math.reciprocal(a, name=self.result_name)
71
72 class Relu:
73 def __init__(self, name):
74 self.result_name = name
75
76 def eval(self, a):
77 return tf.nn.relu(a, name=self.result_name)
78
Jerry Ge93912432022-07-22 10:29:13 -070079 class Relu1:
80 def __init__(self, name):
81 self.result_name = name
82
83 def eval(self, a):
84 # TF doesn't have relu_n1_to_1 operator,
85 # use min and max as a workaround
86 # alternatively, we can use clip_by_value
87 return tf.math.minimum(1.0, tf.math.maximum(-1.0, a))
88
Jerry Ge2eea5bf2022-10-11 16:27:05 +000089 class Relu0To1:
90 def __init__(self, name):
91 self.result_name = name
92
93 def eval(self, a):
94 # TF doesn't have relu_0_to_1 operator,
95 # use min and max as a workaround
96 # alternatively, we can use clip_by_value
97 return tf.math.minimum(1.0, tf.math.maximum(0.0, a))
98
Jeremy Johnson015c3552022-02-23 12:15:03 +000099 class Relu6:
100 def __init__(self, name):
101 self.result_name = name
102
103 def eval(self, a):
104 return tf.nn.relu6(a, name=self.result_name)
105
106 class LeakyRelu:
107 def __init__(self, alpha, name):
108 self.alpha = alpha
109 self.result_name = name
110
111 def eval(self, a):
112 return tf.nn.leaky_relu(a, alpha=self.alpha, name=self.result_name)
113
TatWai Chong41a04fe2022-11-03 21:44:32 +0000114 class Prelu:
115 def __init__(self, name):
116 self.result_name = name
117 self.prelu = tf.keras.layers.PReLU(
118 alpha_initializer=tf.keras.initializers.RandomNormal(
119 mean=0.0, stddev=1.0
120 )
121 )
122
123 def eval(self, a):
124 return self.prelu(a)
125
TatWai Chong473eb382022-08-02 04:21:30 +0000126 class Gelu:
127 def __init__(self, name):
128 self.result_name = name
129
130 def eval(self, a):
131 return tf.nn.gelu(a, name=self.result_name)
132
Jeremy Johnson015c3552022-02-23 12:15:03 +0000133 class Concat:
134 def __init__(self, axis, name):
135 self.axis = axis
136 self.result_name = name
137
138 def eval(self, a, b):
Won Jeonf9c0cee2023-09-18 16:32:45 -0700139 return (
140 tf.concat([a, b], self.axis, name=self.result_name)
141 if a.shape != ()
142 else tf.stack([a, b], name=self.result_name)
143 )
Jeremy Johnson015c3552022-02-23 12:15:03 +0000144
145 class BitwiseAnd:
146 def __init__(self, name):
147 self.result_name = name
148
149 def eval(self, a, b):
150 return tf.bitwise.bitwise_and(a, b, name=self.result_name)
151
152 class BitwiseOr:
153 def __init__(self, name):
154 self.result_name = name
155
156 def eval(self, a, b):
157 return tf.bitwise.bitwise_or(a, b, name=self.result_name)
158
159 class BitwiseNot:
160 def __init__(self, name):
161 self.result_name = name
162
163 def eval(self, a):
164 return tf.bitwise.invert(a, name=self.result_name)
165
166 class BitwiseXor:
167 def __init__(self, name):
168 self.result_name = name
169
170 def eval(self, a, b):
171 return tf.bitwise.bitwise_xor(a, b, name=self.result_name)
172
173 class LogicalAnd:
174 def __init__(self, name):
175 self.result_name = name
176
177 def eval(self, a, b):
178 return tf.math.logical_and(a, b, name=self.result_name)
179
180 class LogicalOr:
181 def __init__(self, name):
182 self.result_name = name
183
184 def eval(self, a, b):
185 return tf.math.logical_or(a, b, name=self.result_name)
186
187 class LogicalNot:
188 def __init__(self, name):
189 self.result_name = name
190
191 def eval(self, a):
192 return tf.math.logical_not(a, name=self.result_name)
193
194 class ReduceAny:
195 def __init__(self, axis_list, keepdims, name):
196 self.axis_list = axis_list
197 self.keepdims = keepdims
198 self.result_name = name
199
200 def eval(self, a):
201 return tf.math.reduce_any(
202 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
203 )
204
205 class ReduceAll:
206 def __init__(self, axis_list, keepdims, name):
207 self.axis_list = axis_list
208 self.keepdims = keepdims
209 self.result_name = name
210
211 def eval(self, a):
212 return tf.math.reduce_all(
213 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
214 )
215
216 class ReduceMin:
217 def __init__(self, axis_list, keepdims, name):
218 self.axis_list = axis_list
219 self.keepdims = keepdims
220 self.result_name = name
221
222 def eval(self, a):
223 return tf.math.reduce_min(
224 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
225 )
226
227 class ReduceMax:
228 def __init__(self, axis_list, keepdims, name):
229 self.axis_list = axis_list
230 self.keepdims = keepdims
231 self.result_name = name
232
233 def eval(self, a):
234 return tf.math.reduce_max(
235 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
236 )
237
238 class ReduceSum:
239 def __init__(self, axis_list, keepdims, name):
240 self.axis_list = axis_list
241 self.keepdims = keepdims
242 self.result_name = name
243
244 def eval(self, a):
245 return tf.math.reduce_sum(
246 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
247 )
248
249 class ReduceMean:
250 def __init__(self, axis_list, keepdims, name):
251 self.axis_list = axis_list
252 self.keepdims = keepdims
253 self.result_name = name
254
255 def eval(self, a):
256 return tf.math.reduce_mean(
257 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
258 )
259
260 class ReduceProduct:
261 def __init__(self, axis_list, keepdims, name):
262 self.axis_list = axis_list
263 self.keepdims = keepdims
264 self.result_name = name
265
266 def eval(self, a):
267 return tf.math.reduce_prod(
268 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
269 )
270
271 class Min:
272 def __init__(self, name):
273 self.result_name = name
274
275 def eval(self, a, b):
276 return tf.math.minimum(a, b, name=self.result_name)
277
278 class Max:
279 def __init__(self, name):
280 self.result_name = name
281
282 def eval(self, a, b):
283 return tf.math.maximum(a, b, name=self.result_name)
284
285 class Pow:
286 def __init__(self, name):
287 self.result_name = name
288
289 def eval(self, a, b):
290 return tf.math.pow(a, b, name=self.result_name)
291
292 class Abs:
293 def __init__(self, name):
294 self.result_name = name
295
296 def eval(self, a):
297 return tf.math.abs(a, name=self.result_name)
298
299 class Ceil:
300 def __init__(self, name):
301 self.result_name = name
302
303 def eval(self, a):
304 return tf.math.ceil(a, name=self.result_name)
305
306 class Floor:
307 def __init__(self, name):
308 self.result_name = name
309
310 def eval(self, a):
311 return tf.math.floor(a, name=self.result_name)
312
313 class Log:
314 def __init__(self, name):
315 self.result_name = name
316
317 def eval(self, a):
318 return tf.math.log(a, name=self.result_name)
319
320 class Negate:
321 def __init__(self, name):
322 self.result_name = name
323
324 def eval(self, a):
325 return tf.math.negative(a, name=self.result_name)
326
327 class Rsqrt:
328 def __init__(self, name):
329 self.result_name = name
330
331 def eval(self, a):
332 return tf.math.rsqrt(a, name=self.result_name)
333
TatWai Chongd713a4d2022-11-10 13:54:28 -0800334 class Sign:
335 def __init__(self, name):
336 self.result_name = name
337
338 def eval(self, a):
339 return tf.math.sign(a, name=self.result_name)
340
Jeremy Johnson015c3552022-02-23 12:15:03 +0000341 class Sigmoid:
342 def __init__(self, name):
343 self.result_name = name
344
345 def eval(self, a):
346 return tf.math.sigmoid(a, name=self.result_name)
347
348 class Tanh:
349 def __init__(self, name):
350 self.result_name = name
351
352 def eval(self, a):
353 return tf.math.tanh(a, name=self.result_name)
354
Won Jeon78155c62023-06-10 00:20:04 +0000355 class Erf:
356 # tfl.ops cannot be generated right now.
357 # https://github.com/tensorflow/tensorflow/issues/60809
358 def __init__(self, name):
359 self.result_name = name
360
361 def eval(self, a):
362 return tf.math.erf(a, name=self.result_name)
363
Luke Hutton41601862022-12-06 17:29:15 +0000364 class Sin:
365 def __init__(self, name):
366 self.result_name = name
367
368 def eval(self, a):
369 return tf.math.sin(a, name=self.result_name)
370
371 class Cos:
372 def __init__(self, name):
373 self.result_name = name
374
375 def eval(self, a):
376 return tf.math.cos(a, name=self.result_name)
377
Luke Hutton2138a192022-12-15 11:01:39 +0000378 class Atan2:
379 def __init__(self, name):
380 self.result_name = name
381
382 def eval(self, a, b):
383 return tf.math.atan2(a, b, name=self.result_name)
384
Jeremy Johnson015c3552022-02-23 12:15:03 +0000385 class Square:
386 def __init__(self, name):
387 self.result_name = name
388
389 def eval(self, a):
390 return tf.math.square(a, name=self.result_name)
391
392 class SquaredDifference:
393 def __init__(self, name):
394 self.result_name = name
395
396 def eval(self, a, b):
397 return tf.math.squared_difference(a, b, name=self.result_name)
398
399 class Equal:
400 def __init__(self, name):
401 self.result_name = name
402
403 def eval(self, a, b):
404 return tf.math.equal(a, b, name=self.result_name)
405
406 class GreaterEqual:
407 def __init__(self, name):
408 self.result_name = name
409
410 def eval(self, a, b):
411 return tf.math.greater_equal(a, b, name=self.result_name)
412
413 class Greater:
414 def __init__(self, name):
415 self.result_name = name
416
417 def eval(self, a, b):
418 return tf.math.greater(a, b, name=self.result_name)
419
420 class Less:
421 def __init__(self, name):
422 self.result_name = name
423
424 def eval(self, a, b):
425 return tf.math.less(a, b, name=self.result_name)
426
427 class LessEqual:
428 def __init__(self, name):
429 self.result_name = name
430
431 def eval(self, a, b):
432 return tf.math.less_equal(a, b, name=self.result_name)
433
434 class Conv2d:
435 def __init__(self, weight, strides, padding, dilations, name):
436 self.weight = weight
437 self.strides = strides
438 self.padding = padding
439 self.dilations = dilations
440 self.result_name = name
441
442 def eval(self, input):
443 return tf.nn.conv2d(
444 input,
445 self.weight,
446 self.strides,
447 self.padding,
448 data_format="NHWC",
449 dilations=self.dilations,
450 name=self.result_name,
451 )
452
453 class Conv2dRelu:
454 def __init__(self, weight, name):
455 self.weight = weight
456 self.result_name = name
457
458 def eval(self, input):
459 conv2d = tf.nn.conv2d(
460 input,
461 self.weight,
462 [1, 1, 1, 1],
463 "SAME",
464 data_format="NHWC",
465 dilations=[1, 1, 1, 1],
466 name="conv2d",
467 )
468 return tf.nn.relu(conv2d, name=self.result_name)
469
470 class Conv2dRelu6:
471 def __init__(self, weight, name):
472 self.weight = weight
473 self.result_name = name
474
475 def eval(self, input):
476 conv2d = tf.nn.conv2d(
477 input,
478 self.weight,
479 [1, 1, 1, 1],
480 "SAME",
481 data_format="NHWC",
482 dilations=[1, 1, 1, 1],
483 name="conv2d",
484 )
485 return tf.nn.relu6(conv2d, name=self.result_name)
486
487 class Conv2dReluN1To1:
488 def __init__(self, weight, name):
489 self.weight = weight
490 self.result_name = name
491
492 def eval(self, input):
493 conv2d = tf.nn.conv2d(
494 input,
495 self.weight,
496 [1, 1, 1, 1],
497 "SAME",
498 data_format="NHWC",
499 dilations=[1, 1, 1, 1],
500 name="conv2d",
501 )
502 return tf.clip_by_value(conv2d, -1.0, 1.0, name=self.result_name)
503
504 class Conv2dTanh:
505 def __init__(self, weight, name):
506 self.weight = weight
507 self.result_name = name
508
509 def eval(self, input):
510 conv2d = tf.nn.conv2d(
511 input,
512 self.weight,
513 [1, 1, 1, 1],
514 "SAME",
515 data_format="NHWC",
516 dilations=[1, 1, 1, 1],
517 name="conv2d",
518 )
519 return tf.math.tanh(conv2d, name=self.result_name)
520
521 class Conv2dWithBias:
522 def __init__(self, weight, bias, strides, padding, dilations, name):
523 self.weight = weight
524 self.bias = bias
525 self.strides = strides
526 self.padding = padding
527 self.dilations = dilations
528 self.result_name = name
529
530 def eval(self, input):
531 conv2d_op = tf.nn.conv2d(
532 input,
533 self.weight,
534 self.strides,
535 self.padding,
536 data_format="NHWC",
537 dilations=self.dilations,
538 name="conv2d",
539 )
540 bias_add_op = tf.nn.bias_add(
541 conv2d_op, self.bias, data_format="NHWC", name=self.result_name
542 )
543 return bias_add_op
544
TatWai Chongfd629052022-07-25 04:01:58 +0000545 class Conv3d:
546 def __init__(self, weight, strides, padding, dilations, name):
547 self.weight = weight
548 self.strides = strides
549 self.padding = padding
550 self.dilations = dilations
551 self.result_name = name
552
553 def eval(self, input):
554 return tf.nn.conv3d(
555 input,
556 self.weight,
557 self.strides,
558 self.padding,
559 data_format="NDHWC",
560 dilations=self.dilations,
561 name=self.result_name,
562 )
563
564 class Conv3dWithBias:
565 def __init__(self, weight, bias, strides, padding, dilations, name):
566 self.weight = weight
567 self.bias = bias
568 self.strides = strides
569 self.padding = padding
570 self.dilations = dilations
571 self.result_name = name
572
573 def eval(self, input):
574 conv3d_op = tf.nn.conv3d(
575 input,
576 self.weight,
577 self.strides,
578 self.padding,
579 data_format="NDHWC",
580 dilations=self.dilations,
581 name="conv3d",
582 )
583 bias_add_op = tf.nn.bias_add(conv3d_op, self.bias, name=self.result_name)
584 return bias_add_op
585
Jeremy Johnson015c3552022-02-23 12:15:03 +0000586 class DepthwiseConv2d:
587 def __init__(self, weight, strides, padding, dilations, name):
588 self.weight = weight
589 self.strides = strides
590 self.padding = padding
591 self.dilations = dilations
592 self.result_name = name
593
594 def eval(self, input):
595 dws_conv2d = tf.nn.depthwise_conv2d(
596 input,
597 self.weight,
598 self.strides,
599 self.padding,
600 data_format="NHWC",
601 dilations=self.dilations,
602 name="dws_conv2d",
603 )
604 return tf.identity(dws_conv2d, name=self.result_name)
605
606 class DepthwiseConv2dWithBias:
607 def __init__(self, weight, bias, strides, padding, dilations, name):
608 self.weight = weight
609 self.bias = bias
610 self.strides = strides
611 self.padding = padding
612 self.dilations = dilations
613 self.result_name = name
614
615 def eval(self, input):
616 dws_conv2d = tf.nn.depthwise_conv2d(
617 input,
618 self.weight,
619 self.strides,
620 self.padding,
621 data_format="NHWC",
622 dilations=self.dilations,
623 name="dws_conv2d",
624 )
625 bias_add_op = tf.nn.bias_add(
626 dws_conv2d, self.bias, data_format="NHWC", name=self.result_name
627 )
628 return bias_add_op
629
630 class TransposeConv2d:
631 def __init__(self, weight, output_shape, strides, padding, name):
632 self.weight = weight
633 self.output_shape = output_shape
634 self.strides = strides
635 self.padding = padding
636 self.result_name = name
637
638 def eval(self, input):
639 return tf.nn.conv2d_transpose(
640 input,
641 self.weight,
642 self.output_shape,
643 self.strides,
644 self.padding,
645 data_format="NHWC",
646 name=self.result_name,
647 )
648
649 class Argmax:
650 def __init__(self, axis, name):
651 self.axis = axis
652 self.result_name = name
653
654 def eval(self, a):
655 return tf.argmax(a, self.axis, output_type=tf.int32, name=self.result_name)
656
657 class AvgPool2d:
658 def __init__(self, strides, kernel_size, padding, name):
659 self.strides = strides
660 self.kernel_size = kernel_size
661 self.padding = padding
662 self.result_name = name
663
664 def eval(self, input):
665 return tf.nn.avg_pool2d(
666 input,
667 strides=self.strides,
668 ksize=self.kernel_size,
669 padding=self.padding,
670 data_format="NHWC",
671 name=self.result_name,
672 )
673
674 class MaxPool2d:
675 def __init__(self, strides, kernel_size, padding, name):
676 self.strides = strides
677 self.kernel_size = kernel_size
678 self.padding = padding
679 self.result_name = name
680
681 def eval(self, input):
682 return tf.nn.max_pool2d(
683 input,
684 strides=self.strides,
685 ksize=self.kernel_size,
686 padding=self.padding,
687 data_format="NHWC",
688 name=self.result_name,
689 )
690
691 class Reshape:
692 def __init__(self, shape, name):
693 self.shape = shape
694 self.result_name = name
695
696 def eval(self, a):
697 reshape_op = tf.reshape(a, self.shape)
698 return tf.identity(reshape_op, name=self.result_name)
699
700 class Transpose:
701 def __init__(self, perm, name):
702 self.perm = perm
703 self.result_name = name
704
705 def eval(self, a):
706 return tf.transpose(a, self.perm, name=self.result_name)
707
708 class Slice:
709 def __init__(self, begin, size, name):
710 self.begin = begin
711 self.size = size
712 self.result_name = name
713
714 def eval(self, a):
715 return tf.slice(a, begin=self.begin, size=self.size, name=self.result_name)
716
717 class StridedSlice:
718 def __init__(
719 self,
720 begin,
721 end,
722 strides,
723 begin_mask,
724 end_mask,
725 ellipsis_mask,
726 new_axis_mask,
727 shrink_axis_mask,
728 name,
729 ):
730 self.begin = begin
731 self.end = end
732 self.strides = strides
733 self.begin_mask = begin_mask
734 self.end_mask = end_mask
735 self.ellipsis_mask = ellipsis_mask
736 self.new_axis_mask = new_axis_mask
737 self.shrink_axis_mask = shrink_axis_mask
738 self.result_name = name
739
740 def eval(self, a):
741 return tf.strided_slice(
742 a,
743 begin=self.begin,
744 end=self.end,
745 strides=self.strides,
746 begin_mask=self.begin_mask,
747 end_mask=self.end_mask,
748 ellipsis_mask=self.ellipsis_mask,
749 new_axis_mask=self.new_axis_mask,
750 shrink_axis_mask=self.shrink_axis_mask,
751 name=self.result_name,
752 )
753
754 class Select:
755 def __init__(self, name):
756 self.result_name = name
757
758 def eval(self, selector, a, b):
759 return tf.where(condition=selector, x=a, y=b, name=self.result_name)
760
761 class Addn:
762 def __init__(self, name):
763 self.result_name = name
764
765 def eval(self, a, b, c, d):
766 return tf.add_n([a, b, c, d], name=self.result_name)
767
768 class Concatv2:
769 def __init__(self, axis, name):
770 self.axis = axis
771 self.result_name = name
772
773 def eval(self, a, b, c, d):
Won Jeonf9c0cee2023-09-18 16:32:45 -0700774 return (
775 tf.concat([a, b, c, d], axis=self.axis, name=self.result_name)
776 if a.shape != ()
777 else tf.stack([a, b, c, d], name=self.result_name)
778 )
Jeremy Johnson015c3552022-02-23 12:15:03 +0000779
780 class Stack:
781 def __init__(self, axis, name):
782 self.axis = axis
783 self.result_name = name
784
785 def eval(self, a, b, c, d):
786 return tf.stack([a, b, c, d], axis=self.axis, name=self.result_name)
787
788 class Unstack:
789 def __init__(self, axis, name):
790 self.axis = axis
791 self.result_name = name
792
793 def eval(self, a):
794 unstack_op = tf.unstack(a, axis=self.axis, name="unstack_op")
795 result_count = a.shape[self.axis]
796
797 if result_count == 1:
798 return tf.identity(unstack_op[0], name=self.result_name)
799
800 sums = []
801 for i in range(result_count):
802 sums.append(
803 tf.math.reduce_sum(unstack_op[i], name="reduce_{}".format(i))
804 )
805 return tf.stack(sums, 0, name=self.result_name)
806
TatWai Chongf7008da2022-09-09 09:35:40 +0000807 class MirrorPad:
808 def __init__(self, padding, mode, name):
809 self.padding = padding
810 self.mode = mode
811 self.result_name = name
812
813 def eval(self, a):
814 return tf.pad(
815 a,
816 self.padding,
817 mode=self.mode,
818 constant_values=0,
819 name=self.result_name,
820 )
821
Jeremy Johnson015c3552022-02-23 12:15:03 +0000822 class Pad:
TatWai Chong2226f902023-02-22 18:38:01 -0800823 def __init__(self, padding, pad_const, name):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000824 self.padding = padding
TatWai Chong2226f902023-02-22 18:38:01 -0800825 self.pad_const = pad_const
Jeremy Johnson015c3552022-02-23 12:15:03 +0000826 self.result_name = name
827
828 def eval(self, a):
829 return tf.pad(
830 a,
831 self.padding,
832 mode="CONSTANT",
TatWai Chong2226f902023-02-22 18:38:01 -0800833 constant_values=self.pad_const,
Jeremy Johnson015c3552022-02-23 12:15:03 +0000834 name=self.result_name,
835 )
836
837 class ExpandDims:
838 def __init__(self, axis, name):
839 self.axis = axis
840 self.result_name = name
841
842 def eval(self, a):
843 return tf.expand_dims(a, self.axis, name=self.result_name)
844
845 class Shape:
846 def __init__(self, name):
847 self.result_name = name
848
849 def eval(self, a):
850 return tf.shape(a, name=self.result_name)
851
852 class Rank:
853 def __init__(self, name):
854 self.result_name = name
855
856 def eval(self, a):
857 return tf.rank(a, name=self.result_name)
858
859 class Fill:
860 def __init__(self, shape, value, name):
861 self.shape = shape
862 self.value = value
863 self.result_name = name
864
865 def eval(self, a):
866 return tf.fill(self.shape, self.value, name=self.result_name)
867
868 class Elu:
869 def __init__(self, name):
870 self.result_name = name
871
872 def eval(self, a):
873 return tf.nn.elu(a, name=self.result_name)
874
875 class Softmax:
876 def __init__(self, name):
877 self.result_name = name
878
879 def eval(self, a):
880 return tf.nn.softmax(a, name=self.result_name)
881
882 class LogSoftmax:
883 def __init__(self, name):
884 self.result_name = name
885
886 def eval(self, a):
887 return tf.nn.log_softmax(a, name=self.result_name)
888
889 class MatMul:
890 def __init__(self, name):
891 self.result_name = name
892
893 def eval(self, a, b):
894 return tf.linalg.matmul(a, b, name=self.result_name)
895
896 class AddScalar:
897 def __init__(self, name):
898 self.result_name = name
899
900 def eval(self, a):
901 return tf.add(a, 1, name=self.result_name)
902
903 class Add1d:
904 def __init__(self, name):
905 self.result_name = name
906
907 def eval(self, a, b):
908 if len(b.shape) > 1:
909 b_1d = tf.reduce_sum(b, axis=list(range(0, len(b.shape) - 1, 1)))
910 else:
911 b_1d = b
912 return tf.add(a, b_1d, name=self.result_name)
913
914 class Split:
915 def __init__(self, num_splits, axis, name):
916 self.num_splits = num_splits
917 self.axis = axis
918 self.result_name = name
919
920 def eval(self, a):
921 # The split op generates a list of outputs. Since we have difficulty
922 # serializing a list or array of Numpy arrays, we will reduce each of
923 # the results
924
925 if not isinstance(self.num_splits, list):
926 split_op = tf.split(
927 a, num_or_size_splits=self.num_splits, axis=self.axis, name="split"
928 )
929 result_count = self.num_splits
930 else:
931 num_split = np.asarray(self.num_splits, dtype=np.int32)
932 split_vec_op = tf.compat.v1.constant(
933 num_split,
934 shape=num_split.shape,
935 dtype=tf.int32,
936 name="const_split_vec",
937 )
938 split_op = tf.split(
939 a, num_or_size_splits=split_vec_op, axis=self.axis, name="split"
940 )
941 result_count = num_split.shape[0]
942
943 sums = []
944 for i in range(result_count):
945 sums.append(tf.math.reduce_sum(split_op[i], name="reduce_{}".format(i)))
946 return tf.stack(sums, 0, name=self.result_name)
947
948 class Tile:
949 def __init__(self, multiples, name):
950 self.multiples = multiples
951 self.result_name = name
952
953 def eval(self, a):
954 t = tf.tile(a, self.multiples, name="tile")
955 return tf.identity(t, name=self.result_name)
956
957 class Reverse:
958 def __init__(self, axis, name):
959 self.axis = axis
960 self.result_name = name
961
962 def eval(self, a):
963 return tf.reverse(a, [self.axis], name=self.result_name)
964
965 class Gather:
966 def __init__(self, indices, batch_dims, axis, name):
967 self.indices = indices
968 self.batch_dims = batch_dims
969 self.axis = axis
970 self.result_name = name
971
972 def eval(self, a):
973 return tf.gather(
974 a,
975 self.indices,
976 batch_dims=self.batch_dims,
977 axis=self.axis,
978 name=self.result_name,
979 )
980
981 class GatherNd:
982 def __init__(self, indices, name):
983 self.indices = indices
984 self.result_name = name
985
986 def eval(self, a):
987 return tf.gather_nd(a, self.indices, name=self.result_name)
988
989 class ScatterNd:
990 def __init__(self, shape, indices_shape, N, rng, name):
991 self.shape = shape
992 self.indices_shape = indices_shape
993 self.N = N
994 self.rng = rng
995 self.result_name = name
996
997 def eval(self, a):
998
999 # This operator is special. The indices and updates tensors really need
1000 # to be created together, but in the current structure of this tool there
1001 # is no way to do that before now. The number of updates is determined by
1002 # the indices, so we can really only create that after indices; but we
1003 # don't know the type at that time.
1004 #
1005 # Shapes are guaranteed deterministic, but we'll use our rng
1006 # copied from the arggen stage. It's possible that index and
1007 # update *values* will be non-deterministic.
1008 #
1009 # We take the tensor_tensor simply to get the dtype.
1010
1011 shape_const = tf.constant(self.shape, tf.int32)
1012
1013 updates_shape = list(self.indices_shape[:-1])
1014 updates_shape.extend(self.shape[self.indices_shape[-1] :])
1015
1016 updates_const = tf.constant(TGen.getRand(updates_shape, a.dtype, self.rng))
1017
1018 indices = np.zeros(self.indices_shape, dtype=np.int32)
1019
1020 # We need to generate the random indices tensor based on the
1021 # limits of 'shape' for each dimension. Surely, there is a faster
1022 # vectorized way to do this, but the tensors are fairly small so we
1023 # will do this one element at a time. Each element needs to be sized based
1024 # on the size of the last dimension.
1025 for idx in np.ndindex(indices.shape):
1026 indices[idx] = self.rng.integers(0, self.shape[idx[-1]], size=1)[0]
1027 # print('{} {}'.format(idx, indices[idx]))
1028
1029 indices_const = tf.constant(indices, dtype=tf.int32)
1030
1031 return tf.scatter_nd(
1032 indices=indices_const,
1033 updates=updates_const,
1034 shape=shape_const,
1035 name=self.result_name,
1036 )
1037
1038 class SpaceToBatch:
1039 def __init__(self, block_shape, padding, name):
1040 self.block_shape = block_shape
1041 self.padding = padding
1042 self.result_name = name
1043
1044 def eval(self, a):
1045 return tf.space_to_batch(
1046 a, self.block_shape, self.padding, name=self.result_name
1047 )
1048
1049 class BatchToSpace:
1050 def __init__(self, block_shape, cropping, name):
1051 self.block_shape = block_shape
1052 self.cropping = cropping
1053 self.result_name = name
1054
1055 def eval(self, a):
1056 # transpose to swap depth and batch first. this could avoid adding new shape
1057 block_rank = len(self.block_shape)
1058 perm = [len(a.shape) - 1]
1059 for i in range(block_rank):
1060 perm.append(i + 1)
1061 perm.append(0)
1062 transpose_op = tf.transpose(a, perm)
1063 return tf.batch_to_space(
1064 transpose_op, self.block_shape, self.cropping, name=self.result_name
1065 )
1066
1067 class SpaceToDepth:
1068 def __init__(self, block_shape, name):
1069 self.block_shape = block_shape
1070 self.result_name = name
1071
1072 def eval(self, a):
1073 return tf.nn.space_to_depth(a, self.block_shape, name=self.result_name)
1074
1075 class DepthToSpace:
1076 def __init__(self, block_shape, name):
1077 self.block_shape = block_shape
1078 self.result_name = name
1079
1080 def eval(self, a):
1081 return tf.nn.depth_to_space(a, self.block_shape, name=self.result_name)
1082
1083 class OneHot:
1084 def __init__(self, depth, axis, name):
1085 self.depth = depth
1086 self.axis = axis
1087 self.result_name = name
1088
1089 def eval(self, indices, on_value, off_value):
1090 return tf.one_hot(
1091 indices,
1092 self.depth,
1093 on_value,
1094 off_value,
1095 self.axis,
1096 on_value.dtype,
1097 self.result_name,
1098 )
1099
1100 class Fakequant:
1101 def __init__(self, num_bits, narrow_range, name):
1102 self.num_bits = num_bits
1103 self.narrow_range = narrow_range
1104 self.result_name = name
1105
1106 def eval(self, a):
1107 return tf.quantization.fake_quant_with_min_max_args(
1108 a,
1109 min=-2.0,
1110 max=2.0,
1111 num_bits=self.num_bits,
1112 narrow_range=self.narrow_range,
1113 name=self.result_name,
1114 )
1115
TatWai Chong0cef07e2023-02-27 13:22:52 -08001116 class Resize:
1117 def __init__(self, mode, align, half, scale, name):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001118 self.result_name = name
TatWai Chong0cef07e2023-02-27 13:22:52 -08001119 self.mode = mode
1120 self.align = align
1121 self.half = half
1122 self.scale = scale
Jeremy Johnson015c3552022-02-23 12:15:03 +00001123
1124 def eval(self, a):
1125 out_shape = []
TatWai Chong0cef07e2023-02-27 13:22:52 -08001126 out_shape.append(a.shape[1] * self.scale)
1127 out_shape.append(a.shape[2] * self.scale)
Jeremy Johnson015c3552022-02-23 12:15:03 +00001128
TatWai Chong0cef07e2023-02-27 13:22:52 -08001129 tf_resize_dict = (
1130 {"tf_resize_func": tf.compat.v1.image.resize_nearest_neighbor}
1131 if (self.mode == "nearest")
1132 else {"tf_resize_func": tf.compat.v1.image.resize_bilinear}
1133 )
1134 resize = tf_resize_dict["tf_resize_func"](
Jeremy Johnson015c3552022-02-23 12:15:03 +00001135 a,
1136 out_shape,
TatWai Chong0cef07e2023-02-27 13:22:52 -08001137 align_corners=self.align,
Jeremy Johnson015c3552022-02-23 12:15:03 +00001138 name="resize",
TatWai Chong0cef07e2023-02-27 13:22:52 -08001139 half_pixel_centers=self.half,
TatWai Chongf7326092022-06-08 12:17:14 -07001140 )
1141 return tf.identity(resize, name=self.result_name)
1142
Jeremy Johnson015c3552022-02-23 12:15:03 +00001143 class LeftShift:
1144 def __init__(self, shift, name):
1145 self.shift = shift
1146 self.result_name = name
1147
1148 def eval(self, a):
1149 return tf.bitwise.left_shift(a, self.shift, name=self.result_name)
1150
1151 class RightShift:
1152 def __init__(self, shift, name):
1153 self.shift = shift
1154 self.result_name = name
1155
1156 def eval(self, a):
1157 return tf.bitwise.right_shift(a, self.shift, name=self.result_name)
Jerry Ge9e94af82022-10-27 09:57:00 -07001158
1159 class While:
1160 def __init__(self, name):
1161 self.result_name = name
1162
1163 def while_cond(self, x):
1164 return tf.reduce_sum(x) < self.cap
1165
1166 def while_body(self, x):
1167 return tf.add(x, tf.math.sigmoid(x))
1168
1169 def eval(self, a):
1170 self.cap = tf.cast(
1171 tf.constant(
1172 2.0,
1173 shape=[
1174 1,
1175 ],
1176 ),
1177 a.dtype,
1178 )
1179
1180 result = tf.while_loop(
1181 self.while_cond, self.while_body, [a], name=self.result_name
1182 )
1183
1184 return result[0]
1185
Tai Lycf84bc92023-09-07 20:49:09 +00001186 class LSTM(tf.Module):
Jerry Ge9e94af82022-10-27 09:57:00 -07001187 def __init__(self, name):
1188 self.result_name = name
1189 self.lstm = tf.keras.layers.LSTM(
1190 2,
1191 activation="tanh",
1192 unroll=False,
1193 recurrent_activation="sigmoid",
1194 use_bias=True,
1195 recurrent_initializer="ones",
1196 kernel_initializer="ones",
1197 )
1198
1199 def eval(self, a):
1200 return self.lstm(a)
1201
Tai Lycf84bc92023-09-07 20:49:09 +00001202 class SLSTM(tf.Module):
1203 def __init__(self, name):
1204 self.result_name = name
1205 self.lstm = tf.keras.layers.LSTM(
1206 2,
1207 stateful=True,
1208 activation="tanh",
1209 unroll=False,
1210 recurrent_activation="sigmoid",
1211 use_bias=True,
1212 recurrent_initializer="ones",
1213 kernel_initializer="ones",
1214 )
1215
1216 def eval(self, a):
1217 return self.lstm(a)
1218
Jerry Ge9e94af82022-10-27 09:57:00 -07001219 class GRU:
1220 def __init__(self, name):
1221 self.result_name = name
1222 self.lstm = tf.keras.layers.GRU(
1223 2,
1224 recurrent_activation="sigmoid",
1225 use_bias=True,
1226 recurrent_initializer="ones",
1227 kernel_initializer="ones",
1228 )
1229
1230 def eval(self, a):
1231 return self.lstm(a)
1232
1233 class RNN:
1234 def __init__(self, name):
1235 self.result_name = name
1236 basic_cell = tf.keras.layers.SimpleRNNCell(
1237 units=2,
1238 activation="sigmoid",
1239 use_bias=True,
1240 recurrent_initializer="ones",
1241 )
1242 self.rnn = tf.keras.layers.RNN(basic_cell, unroll=False)
1243
1244 def eval(self, a):
1245 return self.rnn(a)
1246
1247 class FullyConnected:
1248 def __init__(self, name):
1249 self.result_name = name
1250 self.dense = tf.keras.layers.Dense(2)
1251
1252 def eval(self, a):
1253 return self.dense(a)
Luke Hutton261b7b62023-01-10 14:50:31 +00001254
1255 class RFFT2d:
1256 def __init__(self, fft_length, name):
1257 self.fft_length = fft_length
1258 self.result_name = name
1259
1260 def eval(self, a):
1261 return tf.signal.rfft2d(a, self.fft_length, name=self.result_name)
Luke Hutton714aa602023-02-08 19:45:26 +00001262
1263 class Real:
1264 def __init__(self, name):
1265 self.result_name = name
1266
1267 def eval(self, a):
1268 return tf.math.real(a, name=self.result_name)
1269
1270 class Imag:
1271 def __init__(self, name):
1272 self.result_name = name
1273
1274 def eval(self, a):
1275 return tf.math.imag(a, name=self.result_name)
Tai Lyfe36fa92023-06-01 21:45:12 +00001276
1277 class BroadcastTo:
1278 def __init__(self, shape, name):
1279 self.shape = shape
1280 self.result_name = name
1281
1282 def eval(self, a):
1283 return tf.broadcast_to(a, shape=self.shape, name=self.result_name)
Tai Lycf84bc92023-09-07 20:49:09 +00001284
1285 class CallOnce(tf.Module):
1286 def __init__(self, name):
1287 print(tf.__version__)
1288 self.result_name = name
1289 self.var = tf.Variable([1.0])
1290
1291 @tf.function(
1292 input_signature=[
1293 tf.TensorSpec(
1294 shape=[
1295 1,
1296 ],
1297 dtype=tf.float32,
1298 )
1299 ]
1300 )
1301 def eval(self, a):
1302 return self.var.assign([2.0])