blob: b2822872e0ed8b0855f860e54e9f116da01a67be [file] [log] [blame]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001# Copyright (c) 2020-2022, ARM Limited.
2# SPDX-License-Identifier: Apache-2.0
3import numpy as np
4import tensorflow as tf
5from frameworks.tensor_gen import TGen
6
7
8class TBuilder:
9 """The member functions build the tensorflow operators into small networks
10 for our tests"""
11
12 def __init__(self):
13 pass
14
15 def fake_quant(tensor, tensor_scale, name):
16 """Helper function for quantizing with a scaling parameters structure."""
17 return tf.quantization.fake_quant_with_min_max_args(
18 tensor,
19 min=tensor_scale.min,
20 max=tensor_scale.max,
21 num_bits=tensor_scale.num_bits,
22 narrow_range=tensor_scale.narrow_range,
23 name=name,
24 )
25
26 def fake_quant_params(tensor, min, max, scaling, name):
27 """Helper function for quantizing with individual scaling parameters."""
28 return tf.quantization.fake_quant_with_min_max_args(
29 tensor,
30 min=min,
31 max=max,
32 num_bits=scaling.num_bits,
33 narrow_range=scaling.narrow_range,
34 name=name,
35 )
36
37 class Add:
38 def __init__(self, name):
39 self.result_name = name
40
41 def eval(self, a, b):
42 return tf.add(a, b, name=self.result_name)
43
44 class Sub:
45 def __init__(self, name):
46 self.result_name = name
47
48 def eval(self, a, b):
49 return tf.subtract(a, b, name=self.result_name)
50
51 class Mul:
52 def __init__(self, name):
53 self.result_name = name
54
55 def eval(self, a, b):
56 return tf.multiply(a, b, name=self.result_name)
57
58 class Exp:
59 def __init__(self, name):
60 self.result_name = name
61
62 def eval(self, a):
63 return tf.exp(a, name=self.result_name)
64
65 class Rcp:
66 def __init__(self, name):
67 self.result_name = name
68
69 def eval(self, a):
70 return tf.math.reciprocal(a, name=self.result_name)
71
72 class Relu:
73 def __init__(self, name):
74 self.result_name = name
75
76 def eval(self, a):
77 return tf.nn.relu(a, name=self.result_name)
78
Jerry Ge93912432022-07-22 10:29:13 -070079 class Relu1:
80 def __init__(self, name):
81 self.result_name = name
82
83 def eval(self, a):
84 # TF doesn't have relu_n1_to_1 operator,
85 # use min and max as a workaround
86 # alternatively, we can use clip_by_value
87 return tf.math.minimum(1.0, tf.math.maximum(-1.0, a))
88
Jeremy Johnson015c3552022-02-23 12:15:03 +000089 class Relu6:
90 def __init__(self, name):
91 self.result_name = name
92
93 def eval(self, a):
94 return tf.nn.relu6(a, name=self.result_name)
95
96 class LeakyRelu:
97 def __init__(self, alpha, name):
98 self.alpha = alpha
99 self.result_name = name
100
101 def eval(self, a):
102 return tf.nn.leaky_relu(a, alpha=self.alpha, name=self.result_name)
103
TatWai Chong41a04fe2022-11-03 21:44:32 +0000104 class Prelu:
105 def __init__(self, name):
106 self.result_name = name
107 self.prelu = tf.keras.layers.PReLU(
108 alpha_initializer=tf.keras.initializers.RandomNormal(
109 mean=0.0, stddev=1.0
110 )
111 )
112
113 def eval(self, a):
114 return self.prelu(a)
115
TatWai Chong473eb382022-08-02 04:21:30 +0000116 class Gelu:
117 def __init__(self, name):
118 self.result_name = name
119
120 def eval(self, a):
121 return tf.nn.gelu(a, name=self.result_name)
122
Jeremy Johnson015c3552022-02-23 12:15:03 +0000123 class Concat:
124 def __init__(self, axis, name):
125 self.axis = axis
126 self.result_name = name
127
128 def eval(self, a, b):
129 return tf.concat([a, b], self.axis, name=self.result_name)
130
131 class BitwiseAnd:
132 def __init__(self, name):
133 self.result_name = name
134
135 def eval(self, a, b):
136 return tf.bitwise.bitwise_and(a, b, name=self.result_name)
137
138 class BitwiseOr:
139 def __init__(self, name):
140 self.result_name = name
141
142 def eval(self, a, b):
143 return tf.bitwise.bitwise_or(a, b, name=self.result_name)
144
145 class BitwiseNot:
146 def __init__(self, name):
147 self.result_name = name
148
149 def eval(self, a):
150 return tf.bitwise.invert(a, name=self.result_name)
151
152 class BitwiseXor:
153 def __init__(self, name):
154 self.result_name = name
155
156 def eval(self, a, b):
157 return tf.bitwise.bitwise_xor(a, b, name=self.result_name)
158
159 class LogicalAnd:
160 def __init__(self, name):
161 self.result_name = name
162
163 def eval(self, a, b):
164 return tf.math.logical_and(a, b, name=self.result_name)
165
166 class LogicalOr:
167 def __init__(self, name):
168 self.result_name = name
169
170 def eval(self, a, b):
171 return tf.math.logical_or(a, b, name=self.result_name)
172
173 class LogicalNot:
174 def __init__(self, name):
175 self.result_name = name
176
177 def eval(self, a):
178 return tf.math.logical_not(a, name=self.result_name)
179
180 class ReduceAny:
181 def __init__(self, axis_list, keepdims, name):
182 self.axis_list = axis_list
183 self.keepdims = keepdims
184 self.result_name = name
185
186 def eval(self, a):
187 return tf.math.reduce_any(
188 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
189 )
190
191 class ReduceAll:
192 def __init__(self, axis_list, keepdims, name):
193 self.axis_list = axis_list
194 self.keepdims = keepdims
195 self.result_name = name
196
197 def eval(self, a):
198 return tf.math.reduce_all(
199 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
200 )
201
202 class ReduceMin:
203 def __init__(self, axis_list, keepdims, name):
204 self.axis_list = axis_list
205 self.keepdims = keepdims
206 self.result_name = name
207
208 def eval(self, a):
209 return tf.math.reduce_min(
210 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
211 )
212
213 class ReduceMax:
214 def __init__(self, axis_list, keepdims, name):
215 self.axis_list = axis_list
216 self.keepdims = keepdims
217 self.result_name = name
218
219 def eval(self, a):
220 return tf.math.reduce_max(
221 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
222 )
223
224 class ReduceSum:
225 def __init__(self, axis_list, keepdims, name):
226 self.axis_list = axis_list
227 self.keepdims = keepdims
228 self.result_name = name
229
230 def eval(self, a):
231 return tf.math.reduce_sum(
232 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
233 )
234
235 class ReduceMean:
236 def __init__(self, axis_list, keepdims, name):
237 self.axis_list = axis_list
238 self.keepdims = keepdims
239 self.result_name = name
240
241 def eval(self, a):
242 return tf.math.reduce_mean(
243 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
244 )
245
246 class ReduceProduct:
247 def __init__(self, axis_list, keepdims, name):
248 self.axis_list = axis_list
249 self.keepdims = keepdims
250 self.result_name = name
251
252 def eval(self, a):
253 return tf.math.reduce_prod(
254 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
255 )
256
257 class Min:
258 def __init__(self, name):
259 self.result_name = name
260
261 def eval(self, a, b):
262 return tf.math.minimum(a, b, name=self.result_name)
263
264 class Max:
265 def __init__(self, name):
266 self.result_name = name
267
268 def eval(self, a, b):
269 return tf.math.maximum(a, b, name=self.result_name)
270
271 class Pow:
272 def __init__(self, name):
273 self.result_name = name
274
275 def eval(self, a, b):
276 return tf.math.pow(a, b, name=self.result_name)
277
278 class Abs:
279 def __init__(self, name):
280 self.result_name = name
281
282 def eval(self, a):
283 return tf.math.abs(a, name=self.result_name)
284
285 class Ceil:
286 def __init__(self, name):
287 self.result_name = name
288
289 def eval(self, a):
290 return tf.math.ceil(a, name=self.result_name)
291
292 class Floor:
293 def __init__(self, name):
294 self.result_name = name
295
296 def eval(self, a):
297 return tf.math.floor(a, name=self.result_name)
298
299 class Log:
300 def __init__(self, name):
301 self.result_name = name
302
303 def eval(self, a):
304 return tf.math.log(a, name=self.result_name)
305
306 class Negate:
307 def __init__(self, name):
308 self.result_name = name
309
310 def eval(self, a):
311 return tf.math.negative(a, name=self.result_name)
312
313 class Rsqrt:
314 def __init__(self, name):
315 self.result_name = name
316
317 def eval(self, a):
318 return tf.math.rsqrt(a, name=self.result_name)
319
320 class Sigmoid:
321 def __init__(self, name):
322 self.result_name = name
323
324 def eval(self, a):
325 return tf.math.sigmoid(a, name=self.result_name)
326
327 class Tanh:
328 def __init__(self, name):
329 self.result_name = name
330
331 def eval(self, a):
332 return tf.math.tanh(a, name=self.result_name)
333
334 class Square:
335 def __init__(self, name):
336 self.result_name = name
337
338 def eval(self, a):
339 return tf.math.square(a, name=self.result_name)
340
341 class SquaredDifference:
342 def __init__(self, name):
343 self.result_name = name
344
345 def eval(self, a, b):
346 return tf.math.squared_difference(a, b, name=self.result_name)
347
348 class Equal:
349 def __init__(self, name):
350 self.result_name = name
351
352 def eval(self, a, b):
353 return tf.math.equal(a, b, name=self.result_name)
354
355 class GreaterEqual:
356 def __init__(self, name):
357 self.result_name = name
358
359 def eval(self, a, b):
360 return tf.math.greater_equal(a, b, name=self.result_name)
361
362 class Greater:
363 def __init__(self, name):
364 self.result_name = name
365
366 def eval(self, a, b):
367 return tf.math.greater(a, b, name=self.result_name)
368
369 class Less:
370 def __init__(self, name):
371 self.result_name = name
372
373 def eval(self, a, b):
374 return tf.math.less(a, b, name=self.result_name)
375
376 class LessEqual:
377 def __init__(self, name):
378 self.result_name = name
379
380 def eval(self, a, b):
381 return tf.math.less_equal(a, b, name=self.result_name)
382
383 class Conv2d:
384 def __init__(self, weight, strides, padding, dilations, name):
385 self.weight = weight
386 self.strides = strides
387 self.padding = padding
388 self.dilations = dilations
389 self.result_name = name
390
391 def eval(self, input):
392 return tf.nn.conv2d(
393 input,
394 self.weight,
395 self.strides,
396 self.padding,
397 data_format="NHWC",
398 dilations=self.dilations,
399 name=self.result_name,
400 )
401
402 class Conv2dRelu:
403 def __init__(self, weight, name):
404 self.weight = weight
405 self.result_name = name
406
407 def eval(self, input):
408 conv2d = tf.nn.conv2d(
409 input,
410 self.weight,
411 [1, 1, 1, 1],
412 "SAME",
413 data_format="NHWC",
414 dilations=[1, 1, 1, 1],
415 name="conv2d",
416 )
417 return tf.nn.relu(conv2d, name=self.result_name)
418
419 class Conv2dRelu6:
420 def __init__(self, weight, name):
421 self.weight = weight
422 self.result_name = name
423
424 def eval(self, input):
425 conv2d = tf.nn.conv2d(
426 input,
427 self.weight,
428 [1, 1, 1, 1],
429 "SAME",
430 data_format="NHWC",
431 dilations=[1, 1, 1, 1],
432 name="conv2d",
433 )
434 return tf.nn.relu6(conv2d, name=self.result_name)
435
436 class Conv2dReluN1To1:
437 def __init__(self, weight, name):
438 self.weight = weight
439 self.result_name = name
440
441 def eval(self, input):
442 conv2d = tf.nn.conv2d(
443 input,
444 self.weight,
445 [1, 1, 1, 1],
446 "SAME",
447 data_format="NHWC",
448 dilations=[1, 1, 1, 1],
449 name="conv2d",
450 )
451 return tf.clip_by_value(conv2d, -1.0, 1.0, name=self.result_name)
452
453 class Conv2dTanh:
454 def __init__(self, weight, name):
455 self.weight = weight
456 self.result_name = name
457
458 def eval(self, input):
459 conv2d = tf.nn.conv2d(
460 input,
461 self.weight,
462 [1, 1, 1, 1],
463 "SAME",
464 data_format="NHWC",
465 dilations=[1, 1, 1, 1],
466 name="conv2d",
467 )
468 return tf.math.tanh(conv2d, name=self.result_name)
469
470 class Conv2dWithBias:
471 def __init__(self, weight, bias, strides, padding, dilations, name):
472 self.weight = weight
473 self.bias = bias
474 self.strides = strides
475 self.padding = padding
476 self.dilations = dilations
477 self.result_name = name
478
479 def eval(self, input):
480 conv2d_op = tf.nn.conv2d(
481 input,
482 self.weight,
483 self.strides,
484 self.padding,
485 data_format="NHWC",
486 dilations=self.dilations,
487 name="conv2d",
488 )
489 bias_add_op = tf.nn.bias_add(
490 conv2d_op, self.bias, data_format="NHWC", name=self.result_name
491 )
492 return bias_add_op
493
TatWai Chongfd629052022-07-25 04:01:58 +0000494 class Conv3d:
495 def __init__(self, weight, strides, padding, dilations, name):
496 self.weight = weight
497 self.strides = strides
498 self.padding = padding
499 self.dilations = dilations
500 self.result_name = name
501
502 def eval(self, input):
503 return tf.nn.conv3d(
504 input,
505 self.weight,
506 self.strides,
507 self.padding,
508 data_format="NDHWC",
509 dilations=self.dilations,
510 name=self.result_name,
511 )
512
513 class Conv3dWithBias:
514 def __init__(self, weight, bias, strides, padding, dilations, name):
515 self.weight = weight
516 self.bias = bias
517 self.strides = strides
518 self.padding = padding
519 self.dilations = dilations
520 self.result_name = name
521
522 def eval(self, input):
523 conv3d_op = tf.nn.conv3d(
524 input,
525 self.weight,
526 self.strides,
527 self.padding,
528 data_format="NDHWC",
529 dilations=self.dilations,
530 name="conv3d",
531 )
532 bias_add_op = tf.nn.bias_add(conv3d_op, self.bias, name=self.result_name)
533 return bias_add_op
534
Jeremy Johnson015c3552022-02-23 12:15:03 +0000535 class DepthwiseConv2d:
536 def __init__(self, weight, strides, padding, dilations, name):
537 self.weight = weight
538 self.strides = strides
539 self.padding = padding
540 self.dilations = dilations
541 self.result_name = name
542
543 def eval(self, input):
544 dws_conv2d = tf.nn.depthwise_conv2d(
545 input,
546 self.weight,
547 self.strides,
548 self.padding,
549 data_format="NHWC",
550 dilations=self.dilations,
551 name="dws_conv2d",
552 )
553 return tf.identity(dws_conv2d, name=self.result_name)
554
555 class DepthwiseConv2dWithBias:
556 def __init__(self, weight, bias, strides, padding, dilations, name):
557 self.weight = weight
558 self.bias = bias
559 self.strides = strides
560 self.padding = padding
561 self.dilations = dilations
562 self.result_name = name
563
564 def eval(self, input):
565 dws_conv2d = tf.nn.depthwise_conv2d(
566 input,
567 self.weight,
568 self.strides,
569 self.padding,
570 data_format="NHWC",
571 dilations=self.dilations,
572 name="dws_conv2d",
573 )
574 bias_add_op = tf.nn.bias_add(
575 dws_conv2d, self.bias, data_format="NHWC", name=self.result_name
576 )
577 return bias_add_op
578
579 class TransposeConv2d:
580 def __init__(self, weight, output_shape, strides, padding, name):
581 self.weight = weight
582 self.output_shape = output_shape
583 self.strides = strides
584 self.padding = padding
585 self.result_name = name
586
587 def eval(self, input):
588 return tf.nn.conv2d_transpose(
589 input,
590 self.weight,
591 self.output_shape,
592 self.strides,
593 self.padding,
594 data_format="NHWC",
595 name=self.result_name,
596 )
597
598 class Argmax:
599 def __init__(self, axis, name):
600 self.axis = axis
601 self.result_name = name
602
603 def eval(self, a):
604 return tf.argmax(a, self.axis, output_type=tf.int32, name=self.result_name)
605
606 class AvgPool2d:
607 def __init__(self, strides, kernel_size, padding, name):
608 self.strides = strides
609 self.kernel_size = kernel_size
610 self.padding = padding
611 self.result_name = name
612
613 def eval(self, input):
614 return tf.nn.avg_pool2d(
615 input,
616 strides=self.strides,
617 ksize=self.kernel_size,
618 padding=self.padding,
619 data_format="NHWC",
620 name=self.result_name,
621 )
622
623 class MaxPool2d:
624 def __init__(self, strides, kernel_size, padding, name):
625 self.strides = strides
626 self.kernel_size = kernel_size
627 self.padding = padding
628 self.result_name = name
629
630 def eval(self, input):
631 return tf.nn.max_pool2d(
632 input,
633 strides=self.strides,
634 ksize=self.kernel_size,
635 padding=self.padding,
636 data_format="NHWC",
637 name=self.result_name,
638 )
639
640 class Reshape:
641 def __init__(self, shape, name):
642 self.shape = shape
643 self.result_name = name
644
645 def eval(self, a):
646 reshape_op = tf.reshape(a, self.shape)
647 return tf.identity(reshape_op, name=self.result_name)
648
649 class Transpose:
650 def __init__(self, perm, name):
651 self.perm = perm
652 self.result_name = name
653
654 def eval(self, a):
655 return tf.transpose(a, self.perm, name=self.result_name)
656
657 class Slice:
658 def __init__(self, begin, size, name):
659 self.begin = begin
660 self.size = size
661 self.result_name = name
662
663 def eval(self, a):
664 return tf.slice(a, begin=self.begin, size=self.size, name=self.result_name)
665
666 class StridedSlice:
667 def __init__(
668 self,
669 begin,
670 end,
671 strides,
672 begin_mask,
673 end_mask,
674 ellipsis_mask,
675 new_axis_mask,
676 shrink_axis_mask,
677 name,
678 ):
679 self.begin = begin
680 self.end = end
681 self.strides = strides
682 self.begin_mask = begin_mask
683 self.end_mask = end_mask
684 self.ellipsis_mask = ellipsis_mask
685 self.new_axis_mask = new_axis_mask
686 self.shrink_axis_mask = shrink_axis_mask
687 self.result_name = name
688
689 def eval(self, a):
690 return tf.strided_slice(
691 a,
692 begin=self.begin,
693 end=self.end,
694 strides=self.strides,
695 begin_mask=self.begin_mask,
696 end_mask=self.end_mask,
697 ellipsis_mask=self.ellipsis_mask,
698 new_axis_mask=self.new_axis_mask,
699 shrink_axis_mask=self.shrink_axis_mask,
700 name=self.result_name,
701 )
702
703 class Select:
704 def __init__(self, name):
705 self.result_name = name
706
707 def eval(self, selector, a, b):
708 return tf.where(condition=selector, x=a, y=b, name=self.result_name)
709
710 class Addn:
711 def __init__(self, name):
712 self.result_name = name
713
714 def eval(self, a, b, c, d):
715 return tf.add_n([a, b, c, d], name=self.result_name)
716
717 class Concatv2:
718 def __init__(self, axis, name):
719 self.axis = axis
720 self.result_name = name
721
722 def eval(self, a, b, c, d):
723 return tf.concat([a, b, c, d], axis=self.axis, name=self.result_name)
724
725 class Stack:
726 def __init__(self, axis, name):
727 self.axis = axis
728 self.result_name = name
729
730 def eval(self, a, b, c, d):
731 return tf.stack([a, b, c, d], axis=self.axis, name=self.result_name)
732
733 class Unstack:
734 def __init__(self, axis, name):
735 self.axis = axis
736 self.result_name = name
737
738 def eval(self, a):
739 unstack_op = tf.unstack(a, axis=self.axis, name="unstack_op")
740 result_count = a.shape[self.axis]
741
742 if result_count == 1:
743 return tf.identity(unstack_op[0], name=self.result_name)
744
745 sums = []
746 for i in range(result_count):
747 sums.append(
748 tf.math.reduce_sum(unstack_op[i], name="reduce_{}".format(i))
749 )
750 return tf.stack(sums, 0, name=self.result_name)
751
TatWai Chongf7008da2022-09-09 09:35:40 +0000752 class MirrorPad:
753 def __init__(self, padding, mode, name):
754 self.padding = padding
755 self.mode = mode
756 self.result_name = name
757
758 def eval(self, a):
759 return tf.pad(
760 a,
761 self.padding,
762 mode=self.mode,
763 constant_values=0,
764 name=self.result_name,
765 )
766
Jeremy Johnson015c3552022-02-23 12:15:03 +0000767 class Pad:
768 def __init__(self, padding, name):
769 self.padding = padding
770 self.result_name = name
771
772 def eval(self, a):
773 return tf.pad(
774 a,
775 self.padding,
776 mode="CONSTANT",
777 constant_values=0,
778 name=self.result_name,
779 )
780
781 class ExpandDims:
782 def __init__(self, axis, name):
783 self.axis = axis
784 self.result_name = name
785
786 def eval(self, a):
787 return tf.expand_dims(a, self.axis, name=self.result_name)
788
789 class Shape:
790 def __init__(self, name):
791 self.result_name = name
792
793 def eval(self, a):
794 return tf.shape(a, name=self.result_name)
795
796 class Rank:
797 def __init__(self, name):
798 self.result_name = name
799
800 def eval(self, a):
801 return tf.rank(a, name=self.result_name)
802
803 class Fill:
804 def __init__(self, shape, value, name):
805 self.shape = shape
806 self.value = value
807 self.result_name = name
808
809 def eval(self, a):
810 return tf.fill(self.shape, self.value, name=self.result_name)
811
812 class Elu:
813 def __init__(self, name):
814 self.result_name = name
815
816 def eval(self, a):
817 return tf.nn.elu(a, name=self.result_name)
818
819 class Softmax:
820 def __init__(self, name):
821 self.result_name = name
822
823 def eval(self, a):
824 return tf.nn.softmax(a, name=self.result_name)
825
826 class LogSoftmax:
827 def __init__(self, name):
828 self.result_name = name
829
830 def eval(self, a):
831 return tf.nn.log_softmax(a, name=self.result_name)
832
833 class MatMul:
834 def __init__(self, name):
835 self.result_name = name
836
837 def eval(self, a, b):
838 return tf.linalg.matmul(a, b, name=self.result_name)
839
840 class AddScalar:
841 def __init__(self, name):
842 self.result_name = name
843
844 def eval(self, a):
845 return tf.add(a, 1, name=self.result_name)
846
847 class Add1d:
848 def __init__(self, name):
849 self.result_name = name
850
851 def eval(self, a, b):
852 if len(b.shape) > 1:
853 b_1d = tf.reduce_sum(b, axis=list(range(0, len(b.shape) - 1, 1)))
854 else:
855 b_1d = b
856 return tf.add(a, b_1d, name=self.result_name)
857
858 class Split:
859 def __init__(self, num_splits, axis, name):
860 self.num_splits = num_splits
861 self.axis = axis
862 self.result_name = name
863
864 def eval(self, a):
865 # The split op generates a list of outputs. Since we have difficulty
866 # serializing a list or array of Numpy arrays, we will reduce each of
867 # the results
868
869 if not isinstance(self.num_splits, list):
870 split_op = tf.split(
871 a, num_or_size_splits=self.num_splits, axis=self.axis, name="split"
872 )
873 result_count = self.num_splits
874 else:
875 num_split = np.asarray(self.num_splits, dtype=np.int32)
876 split_vec_op = tf.compat.v1.constant(
877 num_split,
878 shape=num_split.shape,
879 dtype=tf.int32,
880 name="const_split_vec",
881 )
882 split_op = tf.split(
883 a, num_or_size_splits=split_vec_op, axis=self.axis, name="split"
884 )
885 result_count = num_split.shape[0]
886
887 sums = []
888 for i in range(result_count):
889 sums.append(tf.math.reduce_sum(split_op[i], name="reduce_{}".format(i)))
890 return tf.stack(sums, 0, name=self.result_name)
891
892 class Tile:
893 def __init__(self, multiples, name):
894 self.multiples = multiples
895 self.result_name = name
896
897 def eval(self, a):
898 t = tf.tile(a, self.multiples, name="tile")
899 return tf.identity(t, name=self.result_name)
900
901 class Reverse:
902 def __init__(self, axis, name):
903 self.axis = axis
904 self.result_name = name
905
906 def eval(self, a):
907 return tf.reverse(a, [self.axis], name=self.result_name)
908
909 class Gather:
910 def __init__(self, indices, batch_dims, axis, name):
911 self.indices = indices
912 self.batch_dims = batch_dims
913 self.axis = axis
914 self.result_name = name
915
916 def eval(self, a):
917 return tf.gather(
918 a,
919 self.indices,
920 batch_dims=self.batch_dims,
921 axis=self.axis,
922 name=self.result_name,
923 )
924
925 class GatherNd:
926 def __init__(self, indices, name):
927 self.indices = indices
928 self.result_name = name
929
930 def eval(self, a):
931 return tf.gather_nd(a, self.indices, name=self.result_name)
932
933 class ScatterNd:
934 def __init__(self, shape, indices_shape, N, rng, name):
935 self.shape = shape
936 self.indices_shape = indices_shape
937 self.N = N
938 self.rng = rng
939 self.result_name = name
940
941 def eval(self, a):
942
943 # This operator is special. The indices and updates tensors really need
944 # to be created together, but in the current structure of this tool there
945 # is no way to do that before now. The number of updates is determined by
946 # the indices, so we can really only create that after indices; but we
947 # don't know the type at that time.
948 #
949 # Shapes are guaranteed deterministic, but we'll use our rng
950 # copied from the arggen stage. It's possible that index and
951 # update *values* will be non-deterministic.
952 #
953 # We take the tensor_tensor simply to get the dtype.
954
955 shape_const = tf.constant(self.shape, tf.int32)
956
957 updates_shape = list(self.indices_shape[:-1])
958 updates_shape.extend(self.shape[self.indices_shape[-1] :])
959
960 updates_const = tf.constant(TGen.getRand(updates_shape, a.dtype, self.rng))
961
962 indices = np.zeros(self.indices_shape, dtype=np.int32)
963
964 # We need to generate the random indices tensor based on the
965 # limits of 'shape' for each dimension. Surely, there is a faster
966 # vectorized way to do this, but the tensors are fairly small so we
967 # will do this one element at a time. Each element needs to be sized based
968 # on the size of the last dimension.
969 for idx in np.ndindex(indices.shape):
970 indices[idx] = self.rng.integers(0, self.shape[idx[-1]], size=1)[0]
971 # print('{} {}'.format(idx, indices[idx]))
972
973 indices_const = tf.constant(indices, dtype=tf.int32)
974
975 return tf.scatter_nd(
976 indices=indices_const,
977 updates=updates_const,
978 shape=shape_const,
979 name=self.result_name,
980 )
981
982 class SpaceToBatch:
983 def __init__(self, block_shape, padding, name):
984 self.block_shape = block_shape
985 self.padding = padding
986 self.result_name = name
987
988 def eval(self, a):
989 return tf.space_to_batch(
990 a, self.block_shape, self.padding, name=self.result_name
991 )
992
993 class BatchToSpace:
994 def __init__(self, block_shape, cropping, name):
995 self.block_shape = block_shape
996 self.cropping = cropping
997 self.result_name = name
998
999 def eval(self, a):
1000 # transpose to swap depth and batch first. this could avoid adding new shape
1001 block_rank = len(self.block_shape)
1002 perm = [len(a.shape) - 1]
1003 for i in range(block_rank):
1004 perm.append(i + 1)
1005 perm.append(0)
1006 transpose_op = tf.transpose(a, perm)
1007 return tf.batch_to_space(
1008 transpose_op, self.block_shape, self.cropping, name=self.result_name
1009 )
1010
1011 class SpaceToDepth:
1012 def __init__(self, block_shape, name):
1013 self.block_shape = block_shape
1014 self.result_name = name
1015
1016 def eval(self, a):
1017 return tf.nn.space_to_depth(a, self.block_shape, name=self.result_name)
1018
1019 class DepthToSpace:
1020 def __init__(self, block_shape, name):
1021 self.block_shape = block_shape
1022 self.result_name = name
1023
1024 def eval(self, a):
1025 return tf.nn.depth_to_space(a, self.block_shape, name=self.result_name)
1026
1027 class OneHot:
1028 def __init__(self, depth, axis, name):
1029 self.depth = depth
1030 self.axis = axis
1031 self.result_name = name
1032
1033 def eval(self, indices, on_value, off_value):
1034 return tf.one_hot(
1035 indices,
1036 self.depth,
1037 on_value,
1038 off_value,
1039 self.axis,
1040 on_value.dtype,
1041 self.result_name,
1042 )
1043
1044 class Fakequant:
1045 def __init__(self, num_bits, narrow_range, name):
1046 self.num_bits = num_bits
1047 self.narrow_range = narrow_range
1048 self.result_name = name
1049
1050 def eval(self, a):
1051 return tf.quantization.fake_quant_with_min_max_args(
1052 a,
1053 min=-2.0,
1054 max=2.0,
1055 num_bits=self.num_bits,
1056 narrow_range=self.narrow_range,
1057 name=self.result_name,
1058 )
1059
1060 class ResizeNearest:
1061 def __init__(self, name):
1062 self.result_name = name
1063
1064 def eval(self, a):
1065 out_shape = []
1066 out_shape.append(a.shape[1] * 2)
1067 out_shape.append(a.shape[2] * 2)
1068
1069 # tf.image.resize() will overwrite the node name with result_name +
1070 # '/BILINEAR' need to add extra identity to force output tensor name to
1071 # result_name return tf.image.resize(a, out_shape,
1072 # method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, name=result_name)
1073 resize = tf.image.resize(
1074 a,
1075 out_shape,
1076 method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
1077 name="resize",
1078 )
1079 return tf.identity(resize, name=self.result_name)
1080
1081 class ResizeBilinear:
1082 def __init__(self, name):
1083 self.result_name = name
1084
1085 def eval(self, a):
1086 out_shape = []
1087 out_shape.append(a.shape[1] * 2)
1088 out_shape.append(a.shape[2] * 2)
1089
1090 # tf.image.resize() will overwrite the node name with result_name +
1091 # '/BILINEAR' need to add extra identity to force output tensor name to
1092 # result_name return tf.image.resize(a, out_shape,
1093 # method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, name=result_name)
1094 resize = tf.image.resize(
1095 a, out_shape, method=tf.image.ResizeMethod.BILINEAR, name="resize"
1096 )
1097 return tf.identity(resize, name=self.result_name)
1098
TatWai Chongf7326092022-06-08 12:17:14 -07001099 # New tf resize set (align_corners, half_pixel_centers) = (false, true) by default.
1100 # Test the rest option combinations here.
1101 # Note that (align_corners, half_pixel_centers) = (true, true) is NOT valid.
1102 class ResizeBilinearV1AlignCorners:
1103 def __init__(self, name):
1104 self.result_name = name
1105
1106 def eval(self, a):
1107 out_shape = []
1108 out_shape.append(a.shape[1] * 2)
1109 out_shape.append(a.shape[2] * 2)
1110
1111 resize = tf.compat.v1.image.resize_bilinear(
1112 a,
1113 out_shape,
1114 align_corners=True,
1115 name="resize",
1116 half_pixel_centers=False,
1117 )
1118 return tf.identity(resize, name=self.result_name)
1119
1120 class ResizeBilinearV1None:
1121 def __init__(self, name):
1122 self.result_name = name
1123
1124 def eval(self, a):
1125 out_shape = []
1126 out_shape.append(a.shape[1] * 2)
1127 out_shape.append(a.shape[2] * 2)
1128
1129 resize = tf.compat.v1.image.resize_bilinear(
1130 a,
1131 out_shape,
1132 align_corners=False,
1133 name="resize",
1134 half_pixel_centers=False,
1135 )
1136 return tf.identity(resize, name=self.result_name)
1137
Jeremy Johnson015c3552022-02-23 12:15:03 +00001138 class LeftShift:
1139 def __init__(self, shift, name):
1140 self.shift = shift
1141 self.result_name = name
1142
1143 def eval(self, a):
1144 return tf.bitwise.left_shift(a, self.shift, name=self.result_name)
1145
1146 class RightShift:
1147 def __init__(self, shift, name):
1148 self.shift = shift
1149 self.result_name = name
1150
1151 def eval(self, a):
1152 return tf.bitwise.right_shift(a, self.shift, name=self.result_name)