blob: 8870f413e48fdc235378aa8269d8b4a78a6966df [file] [log] [blame]
Jerry Ge9e94af82022-10-27 09:57:00 -07001# Copyright (c) 2020-2023, ARM Limited.
Jeremy Johnson015c3552022-02-23 12:15:03 +00002# SPDX-License-Identifier: Apache-2.0
3import numpy as np
4import tensorflow as tf
5from frameworks.tensor_gen import TGen
6
7
8class TBuilder:
9 """The member functions build the tensorflow operators into small networks
10 for our tests"""
11
12 def __init__(self):
13 pass
14
15 def fake_quant(tensor, tensor_scale, name):
16 """Helper function for quantizing with a scaling parameters structure."""
17 return tf.quantization.fake_quant_with_min_max_args(
18 tensor,
19 min=tensor_scale.min,
20 max=tensor_scale.max,
21 num_bits=tensor_scale.num_bits,
22 narrow_range=tensor_scale.narrow_range,
23 name=name,
24 )
25
26 def fake_quant_params(tensor, min, max, scaling, name):
27 """Helper function for quantizing with individual scaling parameters."""
28 return tf.quantization.fake_quant_with_min_max_args(
29 tensor,
30 min=min,
31 max=max,
32 num_bits=scaling.num_bits,
33 narrow_range=scaling.narrow_range,
34 name=name,
35 )
36
37 class Add:
38 def __init__(self, name):
39 self.result_name = name
40
41 def eval(self, a, b):
42 return tf.add(a, b, name=self.result_name)
43
44 class Sub:
45 def __init__(self, name):
46 self.result_name = name
47
48 def eval(self, a, b):
49 return tf.subtract(a, b, name=self.result_name)
50
51 class Mul:
52 def __init__(self, name):
53 self.result_name = name
54
55 def eval(self, a, b):
56 return tf.multiply(a, b, name=self.result_name)
57
58 class Exp:
59 def __init__(self, name):
60 self.result_name = name
61
62 def eval(self, a):
63 return tf.exp(a, name=self.result_name)
64
65 class Rcp:
66 def __init__(self, name):
67 self.result_name = name
68
69 def eval(self, a):
70 return tf.math.reciprocal(a, name=self.result_name)
71
72 class Relu:
73 def __init__(self, name):
74 self.result_name = name
75
76 def eval(self, a):
77 return tf.nn.relu(a, name=self.result_name)
78
Jerry Ge93912432022-07-22 10:29:13 -070079 class Relu1:
80 def __init__(self, name):
81 self.result_name = name
82
83 def eval(self, a):
84 # TF doesn't have relu_n1_to_1 operator,
85 # use min and max as a workaround
86 # alternatively, we can use clip_by_value
87 return tf.math.minimum(1.0, tf.math.maximum(-1.0, a))
88
Jeremy Johnson015c3552022-02-23 12:15:03 +000089 class Relu6:
90 def __init__(self, name):
91 self.result_name = name
92
93 def eval(self, a):
94 return tf.nn.relu6(a, name=self.result_name)
95
96 class LeakyRelu:
97 def __init__(self, alpha, name):
98 self.alpha = alpha
99 self.result_name = name
100
101 def eval(self, a):
102 return tf.nn.leaky_relu(a, alpha=self.alpha, name=self.result_name)
103
TatWai Chong41a04fe2022-11-03 21:44:32 +0000104 class Prelu:
105 def __init__(self, name):
106 self.result_name = name
107 self.prelu = tf.keras.layers.PReLU(
108 alpha_initializer=tf.keras.initializers.RandomNormal(
109 mean=0.0, stddev=1.0
110 )
111 )
112
113 def eval(self, a):
114 return self.prelu(a)
115
TatWai Chong473eb382022-08-02 04:21:30 +0000116 class Gelu:
117 def __init__(self, name):
118 self.result_name = name
119
120 def eval(self, a):
121 return tf.nn.gelu(a, name=self.result_name)
122
Jeremy Johnson015c3552022-02-23 12:15:03 +0000123 class Concat:
124 def __init__(self, axis, name):
125 self.axis = axis
126 self.result_name = name
127
128 def eval(self, a, b):
129 return tf.concat([a, b], self.axis, name=self.result_name)
130
131 class BitwiseAnd:
132 def __init__(self, name):
133 self.result_name = name
134
135 def eval(self, a, b):
136 return tf.bitwise.bitwise_and(a, b, name=self.result_name)
137
138 class BitwiseOr:
139 def __init__(self, name):
140 self.result_name = name
141
142 def eval(self, a, b):
143 return tf.bitwise.bitwise_or(a, b, name=self.result_name)
144
145 class BitwiseNot:
146 def __init__(self, name):
147 self.result_name = name
148
149 def eval(self, a):
150 return tf.bitwise.invert(a, name=self.result_name)
151
152 class BitwiseXor:
153 def __init__(self, name):
154 self.result_name = name
155
156 def eval(self, a, b):
157 return tf.bitwise.bitwise_xor(a, b, name=self.result_name)
158
159 class LogicalAnd:
160 def __init__(self, name):
161 self.result_name = name
162
163 def eval(self, a, b):
164 return tf.math.logical_and(a, b, name=self.result_name)
165
166 class LogicalOr:
167 def __init__(self, name):
168 self.result_name = name
169
170 def eval(self, a, b):
171 return tf.math.logical_or(a, b, name=self.result_name)
172
173 class LogicalNot:
174 def __init__(self, name):
175 self.result_name = name
176
177 def eval(self, a):
178 return tf.math.logical_not(a, name=self.result_name)
179
180 class ReduceAny:
181 def __init__(self, axis_list, keepdims, name):
182 self.axis_list = axis_list
183 self.keepdims = keepdims
184 self.result_name = name
185
186 def eval(self, a):
187 return tf.math.reduce_any(
188 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
189 )
190
191 class ReduceAll:
192 def __init__(self, axis_list, keepdims, name):
193 self.axis_list = axis_list
194 self.keepdims = keepdims
195 self.result_name = name
196
197 def eval(self, a):
198 return tf.math.reduce_all(
199 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
200 )
201
202 class ReduceMin:
203 def __init__(self, axis_list, keepdims, name):
204 self.axis_list = axis_list
205 self.keepdims = keepdims
206 self.result_name = name
207
208 def eval(self, a):
209 return tf.math.reduce_min(
210 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
211 )
212
213 class ReduceMax:
214 def __init__(self, axis_list, keepdims, name):
215 self.axis_list = axis_list
216 self.keepdims = keepdims
217 self.result_name = name
218
219 def eval(self, a):
220 return tf.math.reduce_max(
221 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
222 )
223
224 class ReduceSum:
225 def __init__(self, axis_list, keepdims, name):
226 self.axis_list = axis_list
227 self.keepdims = keepdims
228 self.result_name = name
229
230 def eval(self, a):
231 return tf.math.reduce_sum(
232 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
233 )
234
235 class ReduceMean:
236 def __init__(self, axis_list, keepdims, name):
237 self.axis_list = axis_list
238 self.keepdims = keepdims
239 self.result_name = name
240
241 def eval(self, a):
242 return tf.math.reduce_mean(
243 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
244 )
245
246 class ReduceProduct:
247 def __init__(self, axis_list, keepdims, name):
248 self.axis_list = axis_list
249 self.keepdims = keepdims
250 self.result_name = name
251
252 def eval(self, a):
253 return tf.math.reduce_prod(
254 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
255 )
256
257 class Min:
258 def __init__(self, name):
259 self.result_name = name
260
261 def eval(self, a, b):
262 return tf.math.minimum(a, b, name=self.result_name)
263
264 class Max:
265 def __init__(self, name):
266 self.result_name = name
267
268 def eval(self, a, b):
269 return tf.math.maximum(a, b, name=self.result_name)
270
271 class Pow:
272 def __init__(self, name):
273 self.result_name = name
274
275 def eval(self, a, b):
276 return tf.math.pow(a, b, name=self.result_name)
277
278 class Abs:
279 def __init__(self, name):
280 self.result_name = name
281
282 def eval(self, a):
283 return tf.math.abs(a, name=self.result_name)
284
285 class Ceil:
286 def __init__(self, name):
287 self.result_name = name
288
289 def eval(self, a):
290 return tf.math.ceil(a, name=self.result_name)
291
292 class Floor:
293 def __init__(self, name):
294 self.result_name = name
295
296 def eval(self, a):
297 return tf.math.floor(a, name=self.result_name)
298
299 class Log:
300 def __init__(self, name):
301 self.result_name = name
302
303 def eval(self, a):
304 return tf.math.log(a, name=self.result_name)
305
306 class Negate:
307 def __init__(self, name):
308 self.result_name = name
309
310 def eval(self, a):
311 return tf.math.negative(a, name=self.result_name)
312
313 class Rsqrt:
314 def __init__(self, name):
315 self.result_name = name
316
317 def eval(self, a):
318 return tf.math.rsqrt(a, name=self.result_name)
319
320 class Sigmoid:
321 def __init__(self, name):
322 self.result_name = name
323
324 def eval(self, a):
325 return tf.math.sigmoid(a, name=self.result_name)
326
327 class Tanh:
328 def __init__(self, name):
329 self.result_name = name
330
331 def eval(self, a):
332 return tf.math.tanh(a, name=self.result_name)
333
Luke Hutton41601862022-12-06 17:29:15 +0000334 class Sin:
335 def __init__(self, name):
336 self.result_name = name
337
338 def eval(self, a):
339 return tf.math.sin(a, name=self.result_name)
340
341 class Cos:
342 def __init__(self, name):
343 self.result_name = name
344
345 def eval(self, a):
346 return tf.math.cos(a, name=self.result_name)
347
Jeremy Johnson015c3552022-02-23 12:15:03 +0000348 class Square:
349 def __init__(self, name):
350 self.result_name = name
351
352 def eval(self, a):
353 return tf.math.square(a, name=self.result_name)
354
355 class SquaredDifference:
356 def __init__(self, name):
357 self.result_name = name
358
359 def eval(self, a, b):
360 return tf.math.squared_difference(a, b, name=self.result_name)
361
362 class Equal:
363 def __init__(self, name):
364 self.result_name = name
365
366 def eval(self, a, b):
367 return tf.math.equal(a, b, name=self.result_name)
368
369 class GreaterEqual:
370 def __init__(self, name):
371 self.result_name = name
372
373 def eval(self, a, b):
374 return tf.math.greater_equal(a, b, name=self.result_name)
375
376 class Greater:
377 def __init__(self, name):
378 self.result_name = name
379
380 def eval(self, a, b):
381 return tf.math.greater(a, b, name=self.result_name)
382
383 class Less:
384 def __init__(self, name):
385 self.result_name = name
386
387 def eval(self, a, b):
388 return tf.math.less(a, b, name=self.result_name)
389
390 class LessEqual:
391 def __init__(self, name):
392 self.result_name = name
393
394 def eval(self, a, b):
395 return tf.math.less_equal(a, b, name=self.result_name)
396
397 class Conv2d:
398 def __init__(self, weight, strides, padding, dilations, name):
399 self.weight = weight
400 self.strides = strides
401 self.padding = padding
402 self.dilations = dilations
403 self.result_name = name
404
405 def eval(self, input):
406 return tf.nn.conv2d(
407 input,
408 self.weight,
409 self.strides,
410 self.padding,
411 data_format="NHWC",
412 dilations=self.dilations,
413 name=self.result_name,
414 )
415
416 class Conv2dRelu:
417 def __init__(self, weight, name):
418 self.weight = weight
419 self.result_name = name
420
421 def eval(self, input):
422 conv2d = tf.nn.conv2d(
423 input,
424 self.weight,
425 [1, 1, 1, 1],
426 "SAME",
427 data_format="NHWC",
428 dilations=[1, 1, 1, 1],
429 name="conv2d",
430 )
431 return tf.nn.relu(conv2d, name=self.result_name)
432
433 class Conv2dRelu6:
434 def __init__(self, weight, name):
435 self.weight = weight
436 self.result_name = name
437
438 def eval(self, input):
439 conv2d = tf.nn.conv2d(
440 input,
441 self.weight,
442 [1, 1, 1, 1],
443 "SAME",
444 data_format="NHWC",
445 dilations=[1, 1, 1, 1],
446 name="conv2d",
447 )
448 return tf.nn.relu6(conv2d, name=self.result_name)
449
450 class Conv2dReluN1To1:
451 def __init__(self, weight, name):
452 self.weight = weight
453 self.result_name = name
454
455 def eval(self, input):
456 conv2d = tf.nn.conv2d(
457 input,
458 self.weight,
459 [1, 1, 1, 1],
460 "SAME",
461 data_format="NHWC",
462 dilations=[1, 1, 1, 1],
463 name="conv2d",
464 )
465 return tf.clip_by_value(conv2d, -1.0, 1.0, name=self.result_name)
466
467 class Conv2dTanh:
468 def __init__(self, weight, name):
469 self.weight = weight
470 self.result_name = name
471
472 def eval(self, input):
473 conv2d = tf.nn.conv2d(
474 input,
475 self.weight,
476 [1, 1, 1, 1],
477 "SAME",
478 data_format="NHWC",
479 dilations=[1, 1, 1, 1],
480 name="conv2d",
481 )
482 return tf.math.tanh(conv2d, name=self.result_name)
483
484 class Conv2dWithBias:
485 def __init__(self, weight, bias, strides, padding, dilations, name):
486 self.weight = weight
487 self.bias = bias
488 self.strides = strides
489 self.padding = padding
490 self.dilations = dilations
491 self.result_name = name
492
493 def eval(self, input):
494 conv2d_op = tf.nn.conv2d(
495 input,
496 self.weight,
497 self.strides,
498 self.padding,
499 data_format="NHWC",
500 dilations=self.dilations,
501 name="conv2d",
502 )
503 bias_add_op = tf.nn.bias_add(
504 conv2d_op, self.bias, data_format="NHWC", name=self.result_name
505 )
506 return bias_add_op
507
TatWai Chongfd629052022-07-25 04:01:58 +0000508 class Conv3d:
509 def __init__(self, weight, strides, padding, dilations, name):
510 self.weight = weight
511 self.strides = strides
512 self.padding = padding
513 self.dilations = dilations
514 self.result_name = name
515
516 def eval(self, input):
517 return tf.nn.conv3d(
518 input,
519 self.weight,
520 self.strides,
521 self.padding,
522 data_format="NDHWC",
523 dilations=self.dilations,
524 name=self.result_name,
525 )
526
527 class Conv3dWithBias:
528 def __init__(self, weight, bias, strides, padding, dilations, name):
529 self.weight = weight
530 self.bias = bias
531 self.strides = strides
532 self.padding = padding
533 self.dilations = dilations
534 self.result_name = name
535
536 def eval(self, input):
537 conv3d_op = tf.nn.conv3d(
538 input,
539 self.weight,
540 self.strides,
541 self.padding,
542 data_format="NDHWC",
543 dilations=self.dilations,
544 name="conv3d",
545 )
546 bias_add_op = tf.nn.bias_add(conv3d_op, self.bias, name=self.result_name)
547 return bias_add_op
548
Jeremy Johnson015c3552022-02-23 12:15:03 +0000549 class DepthwiseConv2d:
550 def __init__(self, weight, strides, padding, dilations, name):
551 self.weight = weight
552 self.strides = strides
553 self.padding = padding
554 self.dilations = dilations
555 self.result_name = name
556
557 def eval(self, input):
558 dws_conv2d = tf.nn.depthwise_conv2d(
559 input,
560 self.weight,
561 self.strides,
562 self.padding,
563 data_format="NHWC",
564 dilations=self.dilations,
565 name="dws_conv2d",
566 )
567 return tf.identity(dws_conv2d, name=self.result_name)
568
569 class DepthwiseConv2dWithBias:
570 def __init__(self, weight, bias, strides, padding, dilations, name):
571 self.weight = weight
572 self.bias = bias
573 self.strides = strides
574 self.padding = padding
575 self.dilations = dilations
576 self.result_name = name
577
578 def eval(self, input):
579 dws_conv2d = tf.nn.depthwise_conv2d(
580 input,
581 self.weight,
582 self.strides,
583 self.padding,
584 data_format="NHWC",
585 dilations=self.dilations,
586 name="dws_conv2d",
587 )
588 bias_add_op = tf.nn.bias_add(
589 dws_conv2d, self.bias, data_format="NHWC", name=self.result_name
590 )
591 return bias_add_op
592
593 class TransposeConv2d:
594 def __init__(self, weight, output_shape, strides, padding, name):
595 self.weight = weight
596 self.output_shape = output_shape
597 self.strides = strides
598 self.padding = padding
599 self.result_name = name
600
601 def eval(self, input):
602 return tf.nn.conv2d_transpose(
603 input,
604 self.weight,
605 self.output_shape,
606 self.strides,
607 self.padding,
608 data_format="NHWC",
609 name=self.result_name,
610 )
611
612 class Argmax:
613 def __init__(self, axis, name):
614 self.axis = axis
615 self.result_name = name
616
617 def eval(self, a):
618 return tf.argmax(a, self.axis, output_type=tf.int32, name=self.result_name)
619
620 class AvgPool2d:
621 def __init__(self, strides, kernel_size, padding, name):
622 self.strides = strides
623 self.kernel_size = kernel_size
624 self.padding = padding
625 self.result_name = name
626
627 def eval(self, input):
628 return tf.nn.avg_pool2d(
629 input,
630 strides=self.strides,
631 ksize=self.kernel_size,
632 padding=self.padding,
633 data_format="NHWC",
634 name=self.result_name,
635 )
636
637 class MaxPool2d:
638 def __init__(self, strides, kernel_size, padding, name):
639 self.strides = strides
640 self.kernel_size = kernel_size
641 self.padding = padding
642 self.result_name = name
643
644 def eval(self, input):
645 return tf.nn.max_pool2d(
646 input,
647 strides=self.strides,
648 ksize=self.kernel_size,
649 padding=self.padding,
650 data_format="NHWC",
651 name=self.result_name,
652 )
653
654 class Reshape:
655 def __init__(self, shape, name):
656 self.shape = shape
657 self.result_name = name
658
659 def eval(self, a):
660 reshape_op = tf.reshape(a, self.shape)
661 return tf.identity(reshape_op, name=self.result_name)
662
663 class Transpose:
664 def __init__(self, perm, name):
665 self.perm = perm
666 self.result_name = name
667
668 def eval(self, a):
669 return tf.transpose(a, self.perm, name=self.result_name)
670
671 class Slice:
672 def __init__(self, begin, size, name):
673 self.begin = begin
674 self.size = size
675 self.result_name = name
676
677 def eval(self, a):
678 return tf.slice(a, begin=self.begin, size=self.size, name=self.result_name)
679
680 class StridedSlice:
681 def __init__(
682 self,
683 begin,
684 end,
685 strides,
686 begin_mask,
687 end_mask,
688 ellipsis_mask,
689 new_axis_mask,
690 shrink_axis_mask,
691 name,
692 ):
693 self.begin = begin
694 self.end = end
695 self.strides = strides
696 self.begin_mask = begin_mask
697 self.end_mask = end_mask
698 self.ellipsis_mask = ellipsis_mask
699 self.new_axis_mask = new_axis_mask
700 self.shrink_axis_mask = shrink_axis_mask
701 self.result_name = name
702
703 def eval(self, a):
704 return tf.strided_slice(
705 a,
706 begin=self.begin,
707 end=self.end,
708 strides=self.strides,
709 begin_mask=self.begin_mask,
710 end_mask=self.end_mask,
711 ellipsis_mask=self.ellipsis_mask,
712 new_axis_mask=self.new_axis_mask,
713 shrink_axis_mask=self.shrink_axis_mask,
714 name=self.result_name,
715 )
716
717 class Select:
718 def __init__(self, name):
719 self.result_name = name
720
721 def eval(self, selector, a, b):
722 return tf.where(condition=selector, x=a, y=b, name=self.result_name)
723
724 class Addn:
725 def __init__(self, name):
726 self.result_name = name
727
728 def eval(self, a, b, c, d):
729 return tf.add_n([a, b, c, d], name=self.result_name)
730
731 class Concatv2:
732 def __init__(self, axis, name):
733 self.axis = axis
734 self.result_name = name
735
736 def eval(self, a, b, c, d):
737 return tf.concat([a, b, c, d], axis=self.axis, name=self.result_name)
738
739 class Stack:
740 def __init__(self, axis, name):
741 self.axis = axis
742 self.result_name = name
743
744 def eval(self, a, b, c, d):
745 return tf.stack([a, b, c, d], axis=self.axis, name=self.result_name)
746
747 class Unstack:
748 def __init__(self, axis, name):
749 self.axis = axis
750 self.result_name = name
751
752 def eval(self, a):
753 unstack_op = tf.unstack(a, axis=self.axis, name="unstack_op")
754 result_count = a.shape[self.axis]
755
756 if result_count == 1:
757 return tf.identity(unstack_op[0], name=self.result_name)
758
759 sums = []
760 for i in range(result_count):
761 sums.append(
762 tf.math.reduce_sum(unstack_op[i], name="reduce_{}".format(i))
763 )
764 return tf.stack(sums, 0, name=self.result_name)
765
TatWai Chongf7008da2022-09-09 09:35:40 +0000766 class MirrorPad:
767 def __init__(self, padding, mode, name):
768 self.padding = padding
769 self.mode = mode
770 self.result_name = name
771
772 def eval(self, a):
773 return tf.pad(
774 a,
775 self.padding,
776 mode=self.mode,
777 constant_values=0,
778 name=self.result_name,
779 )
780
Jeremy Johnson015c3552022-02-23 12:15:03 +0000781 class Pad:
782 def __init__(self, padding, name):
783 self.padding = padding
784 self.result_name = name
785
786 def eval(self, a):
787 return tf.pad(
788 a,
789 self.padding,
790 mode="CONSTANT",
791 constant_values=0,
792 name=self.result_name,
793 )
794
795 class ExpandDims:
796 def __init__(self, axis, name):
797 self.axis = axis
798 self.result_name = name
799
800 def eval(self, a):
801 return tf.expand_dims(a, self.axis, name=self.result_name)
802
803 class Shape:
804 def __init__(self, name):
805 self.result_name = name
806
807 def eval(self, a):
808 return tf.shape(a, name=self.result_name)
809
810 class Rank:
811 def __init__(self, name):
812 self.result_name = name
813
814 def eval(self, a):
815 return tf.rank(a, name=self.result_name)
816
817 class Fill:
818 def __init__(self, shape, value, name):
819 self.shape = shape
820 self.value = value
821 self.result_name = name
822
823 def eval(self, a):
824 return tf.fill(self.shape, self.value, name=self.result_name)
825
826 class Elu:
827 def __init__(self, name):
828 self.result_name = name
829
830 def eval(self, a):
831 return tf.nn.elu(a, name=self.result_name)
832
833 class Softmax:
834 def __init__(self, name):
835 self.result_name = name
836
837 def eval(self, a):
838 return tf.nn.softmax(a, name=self.result_name)
839
840 class LogSoftmax:
841 def __init__(self, name):
842 self.result_name = name
843
844 def eval(self, a):
845 return tf.nn.log_softmax(a, name=self.result_name)
846
847 class MatMul:
848 def __init__(self, name):
849 self.result_name = name
850
851 def eval(self, a, b):
852 return tf.linalg.matmul(a, b, name=self.result_name)
853
854 class AddScalar:
855 def __init__(self, name):
856 self.result_name = name
857
858 def eval(self, a):
859 return tf.add(a, 1, name=self.result_name)
860
861 class Add1d:
862 def __init__(self, name):
863 self.result_name = name
864
865 def eval(self, a, b):
866 if len(b.shape) > 1:
867 b_1d = tf.reduce_sum(b, axis=list(range(0, len(b.shape) - 1, 1)))
868 else:
869 b_1d = b
870 return tf.add(a, b_1d, name=self.result_name)
871
872 class Split:
873 def __init__(self, num_splits, axis, name):
874 self.num_splits = num_splits
875 self.axis = axis
876 self.result_name = name
877
878 def eval(self, a):
879 # The split op generates a list of outputs. Since we have difficulty
880 # serializing a list or array of Numpy arrays, we will reduce each of
881 # the results
882
883 if not isinstance(self.num_splits, list):
884 split_op = tf.split(
885 a, num_or_size_splits=self.num_splits, axis=self.axis, name="split"
886 )
887 result_count = self.num_splits
888 else:
889 num_split = np.asarray(self.num_splits, dtype=np.int32)
890 split_vec_op = tf.compat.v1.constant(
891 num_split,
892 shape=num_split.shape,
893 dtype=tf.int32,
894 name="const_split_vec",
895 )
896 split_op = tf.split(
897 a, num_or_size_splits=split_vec_op, axis=self.axis, name="split"
898 )
899 result_count = num_split.shape[0]
900
901 sums = []
902 for i in range(result_count):
903 sums.append(tf.math.reduce_sum(split_op[i], name="reduce_{}".format(i)))
904 return tf.stack(sums, 0, name=self.result_name)
905
906 class Tile:
907 def __init__(self, multiples, name):
908 self.multiples = multiples
909 self.result_name = name
910
911 def eval(self, a):
912 t = tf.tile(a, self.multiples, name="tile")
913 return tf.identity(t, name=self.result_name)
914
915 class Reverse:
916 def __init__(self, axis, name):
917 self.axis = axis
918 self.result_name = name
919
920 def eval(self, a):
921 return tf.reverse(a, [self.axis], name=self.result_name)
922
923 class Gather:
924 def __init__(self, indices, batch_dims, axis, name):
925 self.indices = indices
926 self.batch_dims = batch_dims
927 self.axis = axis
928 self.result_name = name
929
930 def eval(self, a):
931 return tf.gather(
932 a,
933 self.indices,
934 batch_dims=self.batch_dims,
935 axis=self.axis,
936 name=self.result_name,
937 )
938
939 class GatherNd:
940 def __init__(self, indices, name):
941 self.indices = indices
942 self.result_name = name
943
944 def eval(self, a):
945 return tf.gather_nd(a, self.indices, name=self.result_name)
946
947 class ScatterNd:
948 def __init__(self, shape, indices_shape, N, rng, name):
949 self.shape = shape
950 self.indices_shape = indices_shape
951 self.N = N
952 self.rng = rng
953 self.result_name = name
954
955 def eval(self, a):
956
957 # This operator is special. The indices and updates tensors really need
958 # to be created together, but in the current structure of this tool there
959 # is no way to do that before now. The number of updates is determined by
960 # the indices, so we can really only create that after indices; but we
961 # don't know the type at that time.
962 #
963 # Shapes are guaranteed deterministic, but we'll use our rng
964 # copied from the arggen stage. It's possible that index and
965 # update *values* will be non-deterministic.
966 #
967 # We take the tensor_tensor simply to get the dtype.
968
969 shape_const = tf.constant(self.shape, tf.int32)
970
971 updates_shape = list(self.indices_shape[:-1])
972 updates_shape.extend(self.shape[self.indices_shape[-1] :])
973
974 updates_const = tf.constant(TGen.getRand(updates_shape, a.dtype, self.rng))
975
976 indices = np.zeros(self.indices_shape, dtype=np.int32)
977
978 # We need to generate the random indices tensor based on the
979 # limits of 'shape' for each dimension. Surely, there is a faster
980 # vectorized way to do this, but the tensors are fairly small so we
981 # will do this one element at a time. Each element needs to be sized based
982 # on the size of the last dimension.
983 for idx in np.ndindex(indices.shape):
984 indices[idx] = self.rng.integers(0, self.shape[idx[-1]], size=1)[0]
985 # print('{} {}'.format(idx, indices[idx]))
986
987 indices_const = tf.constant(indices, dtype=tf.int32)
988
989 return tf.scatter_nd(
990 indices=indices_const,
991 updates=updates_const,
992 shape=shape_const,
993 name=self.result_name,
994 )
995
996 class SpaceToBatch:
997 def __init__(self, block_shape, padding, name):
998 self.block_shape = block_shape
999 self.padding = padding
1000 self.result_name = name
1001
1002 def eval(self, a):
1003 return tf.space_to_batch(
1004 a, self.block_shape, self.padding, name=self.result_name
1005 )
1006
1007 class BatchToSpace:
1008 def __init__(self, block_shape, cropping, name):
1009 self.block_shape = block_shape
1010 self.cropping = cropping
1011 self.result_name = name
1012
1013 def eval(self, a):
1014 # transpose to swap depth and batch first. this could avoid adding new shape
1015 block_rank = len(self.block_shape)
1016 perm = [len(a.shape) - 1]
1017 for i in range(block_rank):
1018 perm.append(i + 1)
1019 perm.append(0)
1020 transpose_op = tf.transpose(a, perm)
1021 return tf.batch_to_space(
1022 transpose_op, self.block_shape, self.cropping, name=self.result_name
1023 )
1024
1025 class SpaceToDepth:
1026 def __init__(self, block_shape, name):
1027 self.block_shape = block_shape
1028 self.result_name = name
1029
1030 def eval(self, a):
1031 return tf.nn.space_to_depth(a, self.block_shape, name=self.result_name)
1032
1033 class DepthToSpace:
1034 def __init__(self, block_shape, name):
1035 self.block_shape = block_shape
1036 self.result_name = name
1037
1038 def eval(self, a):
1039 return tf.nn.depth_to_space(a, self.block_shape, name=self.result_name)
1040
1041 class OneHot:
1042 def __init__(self, depth, axis, name):
1043 self.depth = depth
1044 self.axis = axis
1045 self.result_name = name
1046
1047 def eval(self, indices, on_value, off_value):
1048 return tf.one_hot(
1049 indices,
1050 self.depth,
1051 on_value,
1052 off_value,
1053 self.axis,
1054 on_value.dtype,
1055 self.result_name,
1056 )
1057
1058 class Fakequant:
1059 def __init__(self, num_bits, narrow_range, name):
1060 self.num_bits = num_bits
1061 self.narrow_range = narrow_range
1062 self.result_name = name
1063
1064 def eval(self, a):
1065 return tf.quantization.fake_quant_with_min_max_args(
1066 a,
1067 min=-2.0,
1068 max=2.0,
1069 num_bits=self.num_bits,
1070 narrow_range=self.narrow_range,
1071 name=self.result_name,
1072 )
1073
1074 class ResizeNearest:
1075 def __init__(self, name):
1076 self.result_name = name
1077
1078 def eval(self, a):
1079 out_shape = []
1080 out_shape.append(a.shape[1] * 2)
1081 out_shape.append(a.shape[2] * 2)
1082
1083 # tf.image.resize() will overwrite the node name with result_name +
1084 # '/BILINEAR' need to add extra identity to force output tensor name to
1085 # result_name return tf.image.resize(a, out_shape,
1086 # method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, name=result_name)
1087 resize = tf.image.resize(
1088 a,
1089 out_shape,
1090 method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
1091 name="resize",
1092 )
1093 return tf.identity(resize, name=self.result_name)
1094
1095 class ResizeBilinear:
1096 def __init__(self, name):
1097 self.result_name = name
1098
1099 def eval(self, a):
1100 out_shape = []
1101 out_shape.append(a.shape[1] * 2)
1102 out_shape.append(a.shape[2] * 2)
1103
1104 # tf.image.resize() will overwrite the node name with result_name +
1105 # '/BILINEAR' need to add extra identity to force output tensor name to
1106 # result_name return tf.image.resize(a, out_shape,
1107 # method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, name=result_name)
1108 resize = tf.image.resize(
1109 a, out_shape, method=tf.image.ResizeMethod.BILINEAR, name="resize"
1110 )
1111 return tf.identity(resize, name=self.result_name)
1112
TatWai Chongf7326092022-06-08 12:17:14 -07001113 # New tf resize set (align_corners, half_pixel_centers) = (false, true) by default.
1114 # Test the rest option combinations here.
1115 # Note that (align_corners, half_pixel_centers) = (true, true) is NOT valid.
1116 class ResizeBilinearV1AlignCorners:
1117 def __init__(self, name):
1118 self.result_name = name
1119
1120 def eval(self, a):
1121 out_shape = []
1122 out_shape.append(a.shape[1] * 2)
1123 out_shape.append(a.shape[2] * 2)
1124
1125 resize = tf.compat.v1.image.resize_bilinear(
1126 a,
1127 out_shape,
1128 align_corners=True,
1129 name="resize",
1130 half_pixel_centers=False,
1131 )
1132 return tf.identity(resize, name=self.result_name)
1133
1134 class ResizeBilinearV1None:
1135 def __init__(self, name):
1136 self.result_name = name
1137
1138 def eval(self, a):
1139 out_shape = []
1140 out_shape.append(a.shape[1] * 2)
1141 out_shape.append(a.shape[2] * 2)
1142
1143 resize = tf.compat.v1.image.resize_bilinear(
1144 a,
1145 out_shape,
1146 align_corners=False,
1147 name="resize",
1148 half_pixel_centers=False,
1149 )
1150 return tf.identity(resize, name=self.result_name)
1151
Jeremy Johnson015c3552022-02-23 12:15:03 +00001152 class LeftShift:
1153 def __init__(self, shift, name):
1154 self.shift = shift
1155 self.result_name = name
1156
1157 def eval(self, a):
1158 return tf.bitwise.left_shift(a, self.shift, name=self.result_name)
1159
1160 class RightShift:
1161 def __init__(self, shift, name):
1162 self.shift = shift
1163 self.result_name = name
1164
1165 def eval(self, a):
1166 return tf.bitwise.right_shift(a, self.shift, name=self.result_name)
Jerry Ge9e94af82022-10-27 09:57:00 -07001167
1168 class While:
1169 def __init__(self, name):
1170 self.result_name = name
1171
1172 def while_cond(self, x):
1173 return tf.reduce_sum(x) < self.cap
1174
1175 def while_body(self, x):
1176 return tf.add(x, tf.math.sigmoid(x))
1177
1178 def eval(self, a):
1179 self.cap = tf.cast(
1180 tf.constant(
1181 2.0,
1182 shape=[
1183 1,
1184 ],
1185 ),
1186 a.dtype,
1187 )
1188
1189 result = tf.while_loop(
1190 self.while_cond, self.while_body, [a], name=self.result_name
1191 )
1192
1193 return result[0]
1194
1195 class LSTM:
1196 def __init__(self, name):
1197 self.result_name = name
1198 self.lstm = tf.keras.layers.LSTM(
1199 2,
1200 activation="tanh",
1201 unroll=False,
1202 recurrent_activation="sigmoid",
1203 use_bias=True,
1204 recurrent_initializer="ones",
1205 kernel_initializer="ones",
1206 )
1207
1208 def eval(self, a):
1209 return self.lstm(a)
1210
1211 class GRU:
1212 def __init__(self, name):
1213 self.result_name = name
1214 self.lstm = tf.keras.layers.GRU(
1215 2,
1216 recurrent_activation="sigmoid",
1217 use_bias=True,
1218 recurrent_initializer="ones",
1219 kernel_initializer="ones",
1220 )
1221
1222 def eval(self, a):
1223 return self.lstm(a)
1224
1225 class RNN:
1226 def __init__(self, name):
1227 self.result_name = name
1228 basic_cell = tf.keras.layers.SimpleRNNCell(
1229 units=2,
1230 activation="sigmoid",
1231 use_bias=True,
1232 recurrent_initializer="ones",
1233 )
1234 self.rnn = tf.keras.layers.RNN(basic_cell, unroll=False)
1235
1236 def eval(self, a):
1237 return self.rnn(a)
1238
1239 class FullyConnected:
1240 def __init__(self, name):
1241 self.result_name = name
1242 self.dense = tf.keras.layers.Dense(2)
1243
1244 def eval(self, a):
1245 return self.dense(a)