blob: fcd72a36ceb3cc58515338db958cc9060307e068 [file] [log] [blame]
Jerry Ge9e94af82022-10-27 09:57:00 -07001# Copyright (c) 2020-2023, ARM Limited.
Jeremy Johnson015c3552022-02-23 12:15:03 +00002# SPDX-License-Identifier: Apache-2.0
3import numpy as np
4import tensorflow as tf
5from frameworks.tensor_gen import TGen
6
7
8class TBuilder:
9 """The member functions build the tensorflow operators into small networks
10 for our tests"""
11
12 def __init__(self):
13 pass
14
15 def fake_quant(tensor, tensor_scale, name):
16 """Helper function for quantizing with a scaling parameters structure."""
17 return tf.quantization.fake_quant_with_min_max_args(
18 tensor,
19 min=tensor_scale.min,
20 max=tensor_scale.max,
21 num_bits=tensor_scale.num_bits,
22 narrow_range=tensor_scale.narrow_range,
23 name=name,
24 )
25
26 def fake_quant_params(tensor, min, max, scaling, name):
27 """Helper function for quantizing with individual scaling parameters."""
28 return tf.quantization.fake_quant_with_min_max_args(
29 tensor,
30 min=min,
31 max=max,
32 num_bits=scaling.num_bits,
33 narrow_range=scaling.narrow_range,
34 name=name,
35 )
36
37 class Add:
38 def __init__(self, name):
39 self.result_name = name
40
41 def eval(self, a, b):
42 return tf.add(a, b, name=self.result_name)
43
44 class Sub:
45 def __init__(self, name):
46 self.result_name = name
47
48 def eval(self, a, b):
49 return tf.subtract(a, b, name=self.result_name)
50
51 class Mul:
52 def __init__(self, name):
53 self.result_name = name
54
55 def eval(self, a, b):
56 return tf.multiply(a, b, name=self.result_name)
57
58 class Exp:
59 def __init__(self, name):
60 self.result_name = name
61
62 def eval(self, a):
63 return tf.exp(a, name=self.result_name)
64
65 class Rcp:
66 def __init__(self, name):
67 self.result_name = name
68
69 def eval(self, a):
70 return tf.math.reciprocal(a, name=self.result_name)
71
72 class Relu:
73 def __init__(self, name):
74 self.result_name = name
75
76 def eval(self, a):
77 return tf.nn.relu(a, name=self.result_name)
78
Jerry Ge93912432022-07-22 10:29:13 -070079 class Relu1:
80 def __init__(self, name):
81 self.result_name = name
82
83 def eval(self, a):
84 # TF doesn't have relu_n1_to_1 operator,
85 # use min and max as a workaround
86 # alternatively, we can use clip_by_value
87 return tf.math.minimum(1.0, tf.math.maximum(-1.0, a))
88
Jerry Ge2eea5bf2022-10-11 16:27:05 +000089 class Relu0To1:
90 def __init__(self, name):
91 self.result_name = name
92
93 def eval(self, a):
94 # TF doesn't have relu_0_to_1 operator,
95 # use min and max as a workaround
96 # alternatively, we can use clip_by_value
97 return tf.math.minimum(1.0, tf.math.maximum(0.0, a))
98
Jeremy Johnson015c3552022-02-23 12:15:03 +000099 class Relu6:
100 def __init__(self, name):
101 self.result_name = name
102
103 def eval(self, a):
104 return tf.nn.relu6(a, name=self.result_name)
105
106 class LeakyRelu:
107 def __init__(self, alpha, name):
108 self.alpha = alpha
109 self.result_name = name
110
111 def eval(self, a):
112 return tf.nn.leaky_relu(a, alpha=self.alpha, name=self.result_name)
113
TatWai Chong41a04fe2022-11-03 21:44:32 +0000114 class Prelu:
115 def __init__(self, name):
116 self.result_name = name
117 self.prelu = tf.keras.layers.PReLU(
118 alpha_initializer=tf.keras.initializers.RandomNormal(
119 mean=0.0, stddev=1.0
120 )
121 )
122
123 def eval(self, a):
124 return self.prelu(a)
125
TatWai Chong473eb382022-08-02 04:21:30 +0000126 class Gelu:
127 def __init__(self, name):
128 self.result_name = name
129
130 def eval(self, a):
131 return tf.nn.gelu(a, name=self.result_name)
132
Jeremy Johnson015c3552022-02-23 12:15:03 +0000133 class Concat:
134 def __init__(self, axis, name):
135 self.axis = axis
136 self.result_name = name
137
138 def eval(self, a, b):
139 return tf.concat([a, b], self.axis, name=self.result_name)
140
141 class BitwiseAnd:
142 def __init__(self, name):
143 self.result_name = name
144
145 def eval(self, a, b):
146 return tf.bitwise.bitwise_and(a, b, name=self.result_name)
147
148 class BitwiseOr:
149 def __init__(self, name):
150 self.result_name = name
151
152 def eval(self, a, b):
153 return tf.bitwise.bitwise_or(a, b, name=self.result_name)
154
155 class BitwiseNot:
156 def __init__(self, name):
157 self.result_name = name
158
159 def eval(self, a):
160 return tf.bitwise.invert(a, name=self.result_name)
161
162 class BitwiseXor:
163 def __init__(self, name):
164 self.result_name = name
165
166 def eval(self, a, b):
167 return tf.bitwise.bitwise_xor(a, b, name=self.result_name)
168
169 class LogicalAnd:
170 def __init__(self, name):
171 self.result_name = name
172
173 def eval(self, a, b):
174 return tf.math.logical_and(a, b, name=self.result_name)
175
176 class LogicalOr:
177 def __init__(self, name):
178 self.result_name = name
179
180 def eval(self, a, b):
181 return tf.math.logical_or(a, b, name=self.result_name)
182
183 class LogicalNot:
184 def __init__(self, name):
185 self.result_name = name
186
187 def eval(self, a):
188 return tf.math.logical_not(a, name=self.result_name)
189
190 class ReduceAny:
191 def __init__(self, axis_list, keepdims, name):
192 self.axis_list = axis_list
193 self.keepdims = keepdims
194 self.result_name = name
195
196 def eval(self, a):
197 return tf.math.reduce_any(
198 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
199 )
200
201 class ReduceAll:
202 def __init__(self, axis_list, keepdims, name):
203 self.axis_list = axis_list
204 self.keepdims = keepdims
205 self.result_name = name
206
207 def eval(self, a):
208 return tf.math.reduce_all(
209 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
210 )
211
212 class ReduceMin:
213 def __init__(self, axis_list, keepdims, name):
214 self.axis_list = axis_list
215 self.keepdims = keepdims
216 self.result_name = name
217
218 def eval(self, a):
219 return tf.math.reduce_min(
220 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
221 )
222
223 class ReduceMax:
224 def __init__(self, axis_list, keepdims, name):
225 self.axis_list = axis_list
226 self.keepdims = keepdims
227 self.result_name = name
228
229 def eval(self, a):
230 return tf.math.reduce_max(
231 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
232 )
233
234 class ReduceSum:
235 def __init__(self, axis_list, keepdims, name):
236 self.axis_list = axis_list
237 self.keepdims = keepdims
238 self.result_name = name
239
240 def eval(self, a):
241 return tf.math.reduce_sum(
242 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
243 )
244
245 class ReduceMean:
246 def __init__(self, axis_list, keepdims, name):
247 self.axis_list = axis_list
248 self.keepdims = keepdims
249 self.result_name = name
250
251 def eval(self, a):
252 return tf.math.reduce_mean(
253 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
254 )
255
256 class ReduceProduct:
257 def __init__(self, axis_list, keepdims, name):
258 self.axis_list = axis_list
259 self.keepdims = keepdims
260 self.result_name = name
261
262 def eval(self, a):
263 return tf.math.reduce_prod(
264 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
265 )
266
267 class Min:
268 def __init__(self, name):
269 self.result_name = name
270
271 def eval(self, a, b):
272 return tf.math.minimum(a, b, name=self.result_name)
273
274 class Max:
275 def __init__(self, name):
276 self.result_name = name
277
278 def eval(self, a, b):
279 return tf.math.maximum(a, b, name=self.result_name)
280
281 class Pow:
282 def __init__(self, name):
283 self.result_name = name
284
285 def eval(self, a, b):
286 return tf.math.pow(a, b, name=self.result_name)
287
288 class Abs:
289 def __init__(self, name):
290 self.result_name = name
291
292 def eval(self, a):
293 return tf.math.abs(a, name=self.result_name)
294
295 class Ceil:
296 def __init__(self, name):
297 self.result_name = name
298
299 def eval(self, a):
300 return tf.math.ceil(a, name=self.result_name)
301
302 class Floor:
303 def __init__(self, name):
304 self.result_name = name
305
306 def eval(self, a):
307 return tf.math.floor(a, name=self.result_name)
308
309 class Log:
310 def __init__(self, name):
311 self.result_name = name
312
313 def eval(self, a):
314 return tf.math.log(a, name=self.result_name)
315
316 class Negate:
317 def __init__(self, name):
318 self.result_name = name
319
320 def eval(self, a):
321 return tf.math.negative(a, name=self.result_name)
322
323 class Rsqrt:
324 def __init__(self, name):
325 self.result_name = name
326
327 def eval(self, a):
328 return tf.math.rsqrt(a, name=self.result_name)
329
TatWai Chongd713a4d2022-11-10 13:54:28 -0800330 class Sign:
331 def __init__(self, name):
332 self.result_name = name
333
334 def eval(self, a):
335 return tf.math.sign(a, name=self.result_name)
336
Jeremy Johnson015c3552022-02-23 12:15:03 +0000337 class Sigmoid:
338 def __init__(self, name):
339 self.result_name = name
340
341 def eval(self, a):
342 return tf.math.sigmoid(a, name=self.result_name)
343
344 class Tanh:
345 def __init__(self, name):
346 self.result_name = name
347
348 def eval(self, a):
349 return tf.math.tanh(a, name=self.result_name)
350
Won Jeon78155c62023-06-10 00:20:04 +0000351 class Erf:
352 # tfl.ops cannot be generated right now.
353 # https://github.com/tensorflow/tensorflow/issues/60809
354 def __init__(self, name):
355 self.result_name = name
356
357 def eval(self, a):
358 return tf.math.erf(a, name=self.result_name)
359
Luke Hutton41601862022-12-06 17:29:15 +0000360 class Sin:
361 def __init__(self, name):
362 self.result_name = name
363
364 def eval(self, a):
365 return tf.math.sin(a, name=self.result_name)
366
367 class Cos:
368 def __init__(self, name):
369 self.result_name = name
370
371 def eval(self, a):
372 return tf.math.cos(a, name=self.result_name)
373
Luke Hutton2138a192022-12-15 11:01:39 +0000374 class Atan2:
375 def __init__(self, name):
376 self.result_name = name
377
378 def eval(self, a, b):
379 return tf.math.atan2(a, b, name=self.result_name)
380
Jeremy Johnson015c3552022-02-23 12:15:03 +0000381 class Square:
382 def __init__(self, name):
383 self.result_name = name
384
385 def eval(self, a):
386 return tf.math.square(a, name=self.result_name)
387
388 class SquaredDifference:
389 def __init__(self, name):
390 self.result_name = name
391
392 def eval(self, a, b):
393 return tf.math.squared_difference(a, b, name=self.result_name)
394
395 class Equal:
396 def __init__(self, name):
397 self.result_name = name
398
399 def eval(self, a, b):
400 return tf.math.equal(a, b, name=self.result_name)
401
402 class GreaterEqual:
403 def __init__(self, name):
404 self.result_name = name
405
406 def eval(self, a, b):
407 return tf.math.greater_equal(a, b, name=self.result_name)
408
409 class Greater:
410 def __init__(self, name):
411 self.result_name = name
412
413 def eval(self, a, b):
414 return tf.math.greater(a, b, name=self.result_name)
415
416 class Less:
417 def __init__(self, name):
418 self.result_name = name
419
420 def eval(self, a, b):
421 return tf.math.less(a, b, name=self.result_name)
422
423 class LessEqual:
424 def __init__(self, name):
425 self.result_name = name
426
427 def eval(self, a, b):
428 return tf.math.less_equal(a, b, name=self.result_name)
429
430 class Conv2d:
431 def __init__(self, weight, strides, padding, dilations, name):
432 self.weight = weight
433 self.strides = strides
434 self.padding = padding
435 self.dilations = dilations
436 self.result_name = name
437
438 def eval(self, input):
439 return tf.nn.conv2d(
440 input,
441 self.weight,
442 self.strides,
443 self.padding,
444 data_format="NHWC",
445 dilations=self.dilations,
446 name=self.result_name,
447 )
448
449 class Conv2dRelu:
450 def __init__(self, weight, name):
451 self.weight = weight
452 self.result_name = name
453
454 def eval(self, input):
455 conv2d = tf.nn.conv2d(
456 input,
457 self.weight,
458 [1, 1, 1, 1],
459 "SAME",
460 data_format="NHWC",
461 dilations=[1, 1, 1, 1],
462 name="conv2d",
463 )
464 return tf.nn.relu(conv2d, name=self.result_name)
465
466 class Conv2dRelu6:
467 def __init__(self, weight, name):
468 self.weight = weight
469 self.result_name = name
470
471 def eval(self, input):
472 conv2d = tf.nn.conv2d(
473 input,
474 self.weight,
475 [1, 1, 1, 1],
476 "SAME",
477 data_format="NHWC",
478 dilations=[1, 1, 1, 1],
479 name="conv2d",
480 )
481 return tf.nn.relu6(conv2d, name=self.result_name)
482
483 class Conv2dReluN1To1:
484 def __init__(self, weight, name):
485 self.weight = weight
486 self.result_name = name
487
488 def eval(self, input):
489 conv2d = tf.nn.conv2d(
490 input,
491 self.weight,
492 [1, 1, 1, 1],
493 "SAME",
494 data_format="NHWC",
495 dilations=[1, 1, 1, 1],
496 name="conv2d",
497 )
498 return tf.clip_by_value(conv2d, -1.0, 1.0, name=self.result_name)
499
500 class Conv2dTanh:
501 def __init__(self, weight, name):
502 self.weight = weight
503 self.result_name = name
504
505 def eval(self, input):
506 conv2d = tf.nn.conv2d(
507 input,
508 self.weight,
509 [1, 1, 1, 1],
510 "SAME",
511 data_format="NHWC",
512 dilations=[1, 1, 1, 1],
513 name="conv2d",
514 )
515 return tf.math.tanh(conv2d, name=self.result_name)
516
517 class Conv2dWithBias:
518 def __init__(self, weight, bias, strides, padding, dilations, name):
519 self.weight = weight
520 self.bias = bias
521 self.strides = strides
522 self.padding = padding
523 self.dilations = dilations
524 self.result_name = name
525
526 def eval(self, input):
527 conv2d_op = tf.nn.conv2d(
528 input,
529 self.weight,
530 self.strides,
531 self.padding,
532 data_format="NHWC",
533 dilations=self.dilations,
534 name="conv2d",
535 )
536 bias_add_op = tf.nn.bias_add(
537 conv2d_op, self.bias, data_format="NHWC", name=self.result_name
538 )
539 return bias_add_op
540
TatWai Chongfd629052022-07-25 04:01:58 +0000541 class Conv3d:
542 def __init__(self, weight, strides, padding, dilations, name):
543 self.weight = weight
544 self.strides = strides
545 self.padding = padding
546 self.dilations = dilations
547 self.result_name = name
548
549 def eval(self, input):
550 return tf.nn.conv3d(
551 input,
552 self.weight,
553 self.strides,
554 self.padding,
555 data_format="NDHWC",
556 dilations=self.dilations,
557 name=self.result_name,
558 )
559
560 class Conv3dWithBias:
561 def __init__(self, weight, bias, strides, padding, dilations, name):
562 self.weight = weight
563 self.bias = bias
564 self.strides = strides
565 self.padding = padding
566 self.dilations = dilations
567 self.result_name = name
568
569 def eval(self, input):
570 conv3d_op = tf.nn.conv3d(
571 input,
572 self.weight,
573 self.strides,
574 self.padding,
575 data_format="NDHWC",
576 dilations=self.dilations,
577 name="conv3d",
578 )
579 bias_add_op = tf.nn.bias_add(conv3d_op, self.bias, name=self.result_name)
580 return bias_add_op
581
Jeremy Johnson015c3552022-02-23 12:15:03 +0000582 class DepthwiseConv2d:
583 def __init__(self, weight, strides, padding, dilations, name):
584 self.weight = weight
585 self.strides = strides
586 self.padding = padding
587 self.dilations = dilations
588 self.result_name = name
589
590 def eval(self, input):
591 dws_conv2d = tf.nn.depthwise_conv2d(
592 input,
593 self.weight,
594 self.strides,
595 self.padding,
596 data_format="NHWC",
597 dilations=self.dilations,
598 name="dws_conv2d",
599 )
600 return tf.identity(dws_conv2d, name=self.result_name)
601
602 class DepthwiseConv2dWithBias:
603 def __init__(self, weight, bias, strides, padding, dilations, name):
604 self.weight = weight
605 self.bias = bias
606 self.strides = strides
607 self.padding = padding
608 self.dilations = dilations
609 self.result_name = name
610
611 def eval(self, input):
612 dws_conv2d = tf.nn.depthwise_conv2d(
613 input,
614 self.weight,
615 self.strides,
616 self.padding,
617 data_format="NHWC",
618 dilations=self.dilations,
619 name="dws_conv2d",
620 )
621 bias_add_op = tf.nn.bias_add(
622 dws_conv2d, self.bias, data_format="NHWC", name=self.result_name
623 )
624 return bias_add_op
625
626 class TransposeConv2d:
627 def __init__(self, weight, output_shape, strides, padding, name):
628 self.weight = weight
629 self.output_shape = output_shape
630 self.strides = strides
631 self.padding = padding
632 self.result_name = name
633
634 def eval(self, input):
635 return tf.nn.conv2d_transpose(
636 input,
637 self.weight,
638 self.output_shape,
639 self.strides,
640 self.padding,
641 data_format="NHWC",
642 name=self.result_name,
643 )
644
645 class Argmax:
646 def __init__(self, axis, name):
647 self.axis = axis
648 self.result_name = name
649
650 def eval(self, a):
651 return tf.argmax(a, self.axis, output_type=tf.int32, name=self.result_name)
652
653 class AvgPool2d:
654 def __init__(self, strides, kernel_size, padding, name):
655 self.strides = strides
656 self.kernel_size = kernel_size
657 self.padding = padding
658 self.result_name = name
659
660 def eval(self, input):
661 return tf.nn.avg_pool2d(
662 input,
663 strides=self.strides,
664 ksize=self.kernel_size,
665 padding=self.padding,
666 data_format="NHWC",
667 name=self.result_name,
668 )
669
670 class MaxPool2d:
671 def __init__(self, strides, kernel_size, padding, name):
672 self.strides = strides
673 self.kernel_size = kernel_size
674 self.padding = padding
675 self.result_name = name
676
677 def eval(self, input):
678 return tf.nn.max_pool2d(
679 input,
680 strides=self.strides,
681 ksize=self.kernel_size,
682 padding=self.padding,
683 data_format="NHWC",
684 name=self.result_name,
685 )
686
687 class Reshape:
688 def __init__(self, shape, name):
689 self.shape = shape
690 self.result_name = name
691
692 def eval(self, a):
693 reshape_op = tf.reshape(a, self.shape)
694 return tf.identity(reshape_op, name=self.result_name)
695
696 class Transpose:
697 def __init__(self, perm, name):
698 self.perm = perm
699 self.result_name = name
700
701 def eval(self, a):
702 return tf.transpose(a, self.perm, name=self.result_name)
703
704 class Slice:
705 def __init__(self, begin, size, name):
706 self.begin = begin
707 self.size = size
708 self.result_name = name
709
710 def eval(self, a):
711 return tf.slice(a, begin=self.begin, size=self.size, name=self.result_name)
712
713 class StridedSlice:
714 def __init__(
715 self,
716 begin,
717 end,
718 strides,
719 begin_mask,
720 end_mask,
721 ellipsis_mask,
722 new_axis_mask,
723 shrink_axis_mask,
724 name,
725 ):
726 self.begin = begin
727 self.end = end
728 self.strides = strides
729 self.begin_mask = begin_mask
730 self.end_mask = end_mask
731 self.ellipsis_mask = ellipsis_mask
732 self.new_axis_mask = new_axis_mask
733 self.shrink_axis_mask = shrink_axis_mask
734 self.result_name = name
735
736 def eval(self, a):
737 return tf.strided_slice(
738 a,
739 begin=self.begin,
740 end=self.end,
741 strides=self.strides,
742 begin_mask=self.begin_mask,
743 end_mask=self.end_mask,
744 ellipsis_mask=self.ellipsis_mask,
745 new_axis_mask=self.new_axis_mask,
746 shrink_axis_mask=self.shrink_axis_mask,
747 name=self.result_name,
748 )
749
750 class Select:
751 def __init__(self, name):
752 self.result_name = name
753
754 def eval(self, selector, a, b):
755 return tf.where(condition=selector, x=a, y=b, name=self.result_name)
756
757 class Addn:
758 def __init__(self, name):
759 self.result_name = name
760
761 def eval(self, a, b, c, d):
762 return tf.add_n([a, b, c, d], name=self.result_name)
763
764 class Concatv2:
765 def __init__(self, axis, name):
766 self.axis = axis
767 self.result_name = name
768
769 def eval(self, a, b, c, d):
770 return tf.concat([a, b, c, d], axis=self.axis, name=self.result_name)
771
772 class Stack:
773 def __init__(self, axis, name):
774 self.axis = axis
775 self.result_name = name
776
777 def eval(self, a, b, c, d):
778 return tf.stack([a, b, c, d], axis=self.axis, name=self.result_name)
779
780 class Unstack:
781 def __init__(self, axis, name):
782 self.axis = axis
783 self.result_name = name
784
785 def eval(self, a):
786 unstack_op = tf.unstack(a, axis=self.axis, name="unstack_op")
787 result_count = a.shape[self.axis]
788
789 if result_count == 1:
790 return tf.identity(unstack_op[0], name=self.result_name)
791
792 sums = []
793 for i in range(result_count):
794 sums.append(
795 tf.math.reduce_sum(unstack_op[i], name="reduce_{}".format(i))
796 )
797 return tf.stack(sums, 0, name=self.result_name)
798
TatWai Chongf7008da2022-09-09 09:35:40 +0000799 class MirrorPad:
800 def __init__(self, padding, mode, name):
801 self.padding = padding
802 self.mode = mode
803 self.result_name = name
804
805 def eval(self, a):
806 return tf.pad(
807 a,
808 self.padding,
809 mode=self.mode,
810 constant_values=0,
811 name=self.result_name,
812 )
813
Jeremy Johnson015c3552022-02-23 12:15:03 +0000814 class Pad:
TatWai Chong2226f902023-02-22 18:38:01 -0800815 def __init__(self, padding, pad_const, name):
Jeremy Johnson015c3552022-02-23 12:15:03 +0000816 self.padding = padding
TatWai Chong2226f902023-02-22 18:38:01 -0800817 self.pad_const = pad_const
Jeremy Johnson015c3552022-02-23 12:15:03 +0000818 self.result_name = name
819
820 def eval(self, a):
821 return tf.pad(
822 a,
823 self.padding,
824 mode="CONSTANT",
TatWai Chong2226f902023-02-22 18:38:01 -0800825 constant_values=self.pad_const,
Jeremy Johnson015c3552022-02-23 12:15:03 +0000826 name=self.result_name,
827 )
828
829 class ExpandDims:
830 def __init__(self, axis, name):
831 self.axis = axis
832 self.result_name = name
833
834 def eval(self, a):
835 return tf.expand_dims(a, self.axis, name=self.result_name)
836
837 class Shape:
838 def __init__(self, name):
839 self.result_name = name
840
841 def eval(self, a):
842 return tf.shape(a, name=self.result_name)
843
844 class Rank:
845 def __init__(self, name):
846 self.result_name = name
847
848 def eval(self, a):
849 return tf.rank(a, name=self.result_name)
850
851 class Fill:
852 def __init__(self, shape, value, name):
853 self.shape = shape
854 self.value = value
855 self.result_name = name
856
857 def eval(self, a):
858 return tf.fill(self.shape, self.value, name=self.result_name)
859
860 class Elu:
861 def __init__(self, name):
862 self.result_name = name
863
864 def eval(self, a):
865 return tf.nn.elu(a, name=self.result_name)
866
867 class Softmax:
868 def __init__(self, name):
869 self.result_name = name
870
871 def eval(self, a):
872 return tf.nn.softmax(a, name=self.result_name)
873
874 class LogSoftmax:
875 def __init__(self, name):
876 self.result_name = name
877
878 def eval(self, a):
879 return tf.nn.log_softmax(a, name=self.result_name)
880
881 class MatMul:
882 def __init__(self, name):
883 self.result_name = name
884
885 def eval(self, a, b):
886 return tf.linalg.matmul(a, b, name=self.result_name)
887
888 class AddScalar:
889 def __init__(self, name):
890 self.result_name = name
891
892 def eval(self, a):
893 return tf.add(a, 1, name=self.result_name)
894
895 class Add1d:
896 def __init__(self, name):
897 self.result_name = name
898
899 def eval(self, a, b):
900 if len(b.shape) > 1:
901 b_1d = tf.reduce_sum(b, axis=list(range(0, len(b.shape) - 1, 1)))
902 else:
903 b_1d = b
904 return tf.add(a, b_1d, name=self.result_name)
905
906 class Split:
907 def __init__(self, num_splits, axis, name):
908 self.num_splits = num_splits
909 self.axis = axis
910 self.result_name = name
911
912 def eval(self, a):
913 # The split op generates a list of outputs. Since we have difficulty
914 # serializing a list or array of Numpy arrays, we will reduce each of
915 # the results
916
917 if not isinstance(self.num_splits, list):
918 split_op = tf.split(
919 a, num_or_size_splits=self.num_splits, axis=self.axis, name="split"
920 )
921 result_count = self.num_splits
922 else:
923 num_split = np.asarray(self.num_splits, dtype=np.int32)
924 split_vec_op = tf.compat.v1.constant(
925 num_split,
926 shape=num_split.shape,
927 dtype=tf.int32,
928 name="const_split_vec",
929 )
930 split_op = tf.split(
931 a, num_or_size_splits=split_vec_op, axis=self.axis, name="split"
932 )
933 result_count = num_split.shape[0]
934
935 sums = []
936 for i in range(result_count):
937 sums.append(tf.math.reduce_sum(split_op[i], name="reduce_{}".format(i)))
938 return tf.stack(sums, 0, name=self.result_name)
939
940 class Tile:
941 def __init__(self, multiples, name):
942 self.multiples = multiples
943 self.result_name = name
944
945 def eval(self, a):
946 t = tf.tile(a, self.multiples, name="tile")
947 return tf.identity(t, name=self.result_name)
948
949 class Reverse:
950 def __init__(self, axis, name):
951 self.axis = axis
952 self.result_name = name
953
954 def eval(self, a):
955 return tf.reverse(a, [self.axis], name=self.result_name)
956
957 class Gather:
958 def __init__(self, indices, batch_dims, axis, name):
959 self.indices = indices
960 self.batch_dims = batch_dims
961 self.axis = axis
962 self.result_name = name
963
964 def eval(self, a):
965 return tf.gather(
966 a,
967 self.indices,
968 batch_dims=self.batch_dims,
969 axis=self.axis,
970 name=self.result_name,
971 )
972
973 class GatherNd:
974 def __init__(self, indices, name):
975 self.indices = indices
976 self.result_name = name
977
978 def eval(self, a):
979 return tf.gather_nd(a, self.indices, name=self.result_name)
980
981 class ScatterNd:
982 def __init__(self, shape, indices_shape, N, rng, name):
983 self.shape = shape
984 self.indices_shape = indices_shape
985 self.N = N
986 self.rng = rng
987 self.result_name = name
988
989 def eval(self, a):
990
991 # This operator is special. The indices and updates tensors really need
992 # to be created together, but in the current structure of this tool there
993 # is no way to do that before now. The number of updates is determined by
994 # the indices, so we can really only create that after indices; but we
995 # don't know the type at that time.
996 #
997 # Shapes are guaranteed deterministic, but we'll use our rng
998 # copied from the arggen stage. It's possible that index and
999 # update *values* will be non-deterministic.
1000 #
1001 # We take the tensor_tensor simply to get the dtype.
1002
1003 shape_const = tf.constant(self.shape, tf.int32)
1004
1005 updates_shape = list(self.indices_shape[:-1])
1006 updates_shape.extend(self.shape[self.indices_shape[-1] :])
1007
1008 updates_const = tf.constant(TGen.getRand(updates_shape, a.dtype, self.rng))
1009
1010 indices = np.zeros(self.indices_shape, dtype=np.int32)
1011
1012 # We need to generate the random indices tensor based on the
1013 # limits of 'shape' for each dimension. Surely, there is a faster
1014 # vectorized way to do this, but the tensors are fairly small so we
1015 # will do this one element at a time. Each element needs to be sized based
1016 # on the size of the last dimension.
1017 for idx in np.ndindex(indices.shape):
1018 indices[idx] = self.rng.integers(0, self.shape[idx[-1]], size=1)[0]
1019 # print('{} {}'.format(idx, indices[idx]))
1020
1021 indices_const = tf.constant(indices, dtype=tf.int32)
1022
1023 return tf.scatter_nd(
1024 indices=indices_const,
1025 updates=updates_const,
1026 shape=shape_const,
1027 name=self.result_name,
1028 )
1029
1030 class SpaceToBatch:
1031 def __init__(self, block_shape, padding, name):
1032 self.block_shape = block_shape
1033 self.padding = padding
1034 self.result_name = name
1035
1036 def eval(self, a):
1037 return tf.space_to_batch(
1038 a, self.block_shape, self.padding, name=self.result_name
1039 )
1040
1041 class BatchToSpace:
1042 def __init__(self, block_shape, cropping, name):
1043 self.block_shape = block_shape
1044 self.cropping = cropping
1045 self.result_name = name
1046
1047 def eval(self, a):
1048 # transpose to swap depth and batch first. this could avoid adding new shape
1049 block_rank = len(self.block_shape)
1050 perm = [len(a.shape) - 1]
1051 for i in range(block_rank):
1052 perm.append(i + 1)
1053 perm.append(0)
1054 transpose_op = tf.transpose(a, perm)
1055 return tf.batch_to_space(
1056 transpose_op, self.block_shape, self.cropping, name=self.result_name
1057 )
1058
1059 class SpaceToDepth:
1060 def __init__(self, block_shape, name):
1061 self.block_shape = block_shape
1062 self.result_name = name
1063
1064 def eval(self, a):
1065 return tf.nn.space_to_depth(a, self.block_shape, name=self.result_name)
1066
1067 class DepthToSpace:
1068 def __init__(self, block_shape, name):
1069 self.block_shape = block_shape
1070 self.result_name = name
1071
1072 def eval(self, a):
1073 return tf.nn.depth_to_space(a, self.block_shape, name=self.result_name)
1074
1075 class OneHot:
1076 def __init__(self, depth, axis, name):
1077 self.depth = depth
1078 self.axis = axis
1079 self.result_name = name
1080
1081 def eval(self, indices, on_value, off_value):
1082 return tf.one_hot(
1083 indices,
1084 self.depth,
1085 on_value,
1086 off_value,
1087 self.axis,
1088 on_value.dtype,
1089 self.result_name,
1090 )
1091
1092 class Fakequant:
1093 def __init__(self, num_bits, narrow_range, name):
1094 self.num_bits = num_bits
1095 self.narrow_range = narrow_range
1096 self.result_name = name
1097
1098 def eval(self, a):
1099 return tf.quantization.fake_quant_with_min_max_args(
1100 a,
1101 min=-2.0,
1102 max=2.0,
1103 num_bits=self.num_bits,
1104 narrow_range=self.narrow_range,
1105 name=self.result_name,
1106 )
1107
TatWai Chong0cef07e2023-02-27 13:22:52 -08001108 class Resize:
1109 def __init__(self, mode, align, half, scale, name):
Jeremy Johnson015c3552022-02-23 12:15:03 +00001110 self.result_name = name
TatWai Chong0cef07e2023-02-27 13:22:52 -08001111 self.mode = mode
1112 self.align = align
1113 self.half = half
1114 self.scale = scale
Jeremy Johnson015c3552022-02-23 12:15:03 +00001115
1116 def eval(self, a):
1117 out_shape = []
TatWai Chong0cef07e2023-02-27 13:22:52 -08001118 out_shape.append(a.shape[1] * self.scale)
1119 out_shape.append(a.shape[2] * self.scale)
Jeremy Johnson015c3552022-02-23 12:15:03 +00001120
TatWai Chong0cef07e2023-02-27 13:22:52 -08001121 tf_resize_dict = (
1122 {"tf_resize_func": tf.compat.v1.image.resize_nearest_neighbor}
1123 if (self.mode == "nearest")
1124 else {"tf_resize_func": tf.compat.v1.image.resize_bilinear}
1125 )
1126 resize = tf_resize_dict["tf_resize_func"](
Jeremy Johnson015c3552022-02-23 12:15:03 +00001127 a,
1128 out_shape,
TatWai Chong0cef07e2023-02-27 13:22:52 -08001129 align_corners=self.align,
Jeremy Johnson015c3552022-02-23 12:15:03 +00001130 name="resize",
TatWai Chong0cef07e2023-02-27 13:22:52 -08001131 half_pixel_centers=self.half,
TatWai Chongf7326092022-06-08 12:17:14 -07001132 )
1133 return tf.identity(resize, name=self.result_name)
1134
Jeremy Johnson015c3552022-02-23 12:15:03 +00001135 class LeftShift:
1136 def __init__(self, shift, name):
1137 self.shift = shift
1138 self.result_name = name
1139
1140 def eval(self, a):
1141 return tf.bitwise.left_shift(a, self.shift, name=self.result_name)
1142
1143 class RightShift:
1144 def __init__(self, shift, name):
1145 self.shift = shift
1146 self.result_name = name
1147
1148 def eval(self, a):
1149 return tf.bitwise.right_shift(a, self.shift, name=self.result_name)
Jerry Ge9e94af82022-10-27 09:57:00 -07001150
1151 class While:
1152 def __init__(self, name):
1153 self.result_name = name
1154
1155 def while_cond(self, x):
1156 return tf.reduce_sum(x) < self.cap
1157
1158 def while_body(self, x):
1159 return tf.add(x, tf.math.sigmoid(x))
1160
1161 def eval(self, a):
1162 self.cap = tf.cast(
1163 tf.constant(
1164 2.0,
1165 shape=[
1166 1,
1167 ],
1168 ),
1169 a.dtype,
1170 )
1171
1172 result = tf.while_loop(
1173 self.while_cond, self.while_body, [a], name=self.result_name
1174 )
1175
1176 return result[0]
1177
1178 class LSTM:
1179 def __init__(self, name):
1180 self.result_name = name
1181 self.lstm = tf.keras.layers.LSTM(
1182 2,
1183 activation="tanh",
1184 unroll=False,
1185 recurrent_activation="sigmoid",
1186 use_bias=True,
1187 recurrent_initializer="ones",
1188 kernel_initializer="ones",
1189 )
1190
1191 def eval(self, a):
1192 return self.lstm(a)
1193
1194 class GRU:
1195 def __init__(self, name):
1196 self.result_name = name
1197 self.lstm = tf.keras.layers.GRU(
1198 2,
1199 recurrent_activation="sigmoid",
1200 use_bias=True,
1201 recurrent_initializer="ones",
1202 kernel_initializer="ones",
1203 )
1204
1205 def eval(self, a):
1206 return self.lstm(a)
1207
1208 class RNN:
1209 def __init__(self, name):
1210 self.result_name = name
1211 basic_cell = tf.keras.layers.SimpleRNNCell(
1212 units=2,
1213 activation="sigmoid",
1214 use_bias=True,
1215 recurrent_initializer="ones",
1216 )
1217 self.rnn = tf.keras.layers.RNN(basic_cell, unroll=False)
1218
1219 def eval(self, a):
1220 return self.rnn(a)
1221
1222 class FullyConnected:
1223 def __init__(self, name):
1224 self.result_name = name
1225 self.dense = tf.keras.layers.Dense(2)
1226
1227 def eval(self, a):
1228 return self.dense(a)
Luke Hutton261b7b62023-01-10 14:50:31 +00001229
1230 class RFFT2d:
1231 def __init__(self, fft_length, name):
1232 self.fft_length = fft_length
1233 self.result_name = name
1234
1235 def eval(self, a):
1236 return tf.signal.rfft2d(a, self.fft_length, name=self.result_name)
Luke Hutton714aa602023-02-08 19:45:26 +00001237
1238 class Real:
1239 def __init__(self, name):
1240 self.result_name = name
1241
1242 def eval(self, a):
1243 return tf.math.real(a, name=self.result_name)
1244
1245 class Imag:
1246 def __init__(self, name):
1247 self.result_name = name
1248
1249 def eval(self, a):
1250 return tf.math.imag(a, name=self.result_name)
Tai Lyfe36fa92023-06-01 21:45:12 +00001251
1252 class BroadcastTo:
1253 def __init__(self, shape, name):
1254 self.shape = shape
1255 self.result_name = name
1256
1257 def eval(self, a):
1258 return tf.broadcast_to(a, shape=self.shape, name=self.result_name)