blob: 8677559a8dbce5a2f7a5f18f84f696916c2a4d1b [file] [log] [blame]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001# Copyright (c) 2020-2022, ARM Limited.
2# SPDX-License-Identifier: Apache-2.0
3import numpy as np
4import tensorflow as tf
5from frameworks.tensor_gen import TGen
6
7
8class TBuilder:
9 """The member functions build the tensorflow operators into small networks
10 for our tests"""
11
12 def __init__(self):
13 pass
14
15 def fake_quant(tensor, tensor_scale, name):
16 """Helper function for quantizing with a scaling parameters structure."""
17 return tf.quantization.fake_quant_with_min_max_args(
18 tensor,
19 min=tensor_scale.min,
20 max=tensor_scale.max,
21 num_bits=tensor_scale.num_bits,
22 narrow_range=tensor_scale.narrow_range,
23 name=name,
24 )
25
26 def fake_quant_params(tensor, min, max, scaling, name):
27 """Helper function for quantizing with individual scaling parameters."""
28 return tf.quantization.fake_quant_with_min_max_args(
29 tensor,
30 min=min,
31 max=max,
32 num_bits=scaling.num_bits,
33 narrow_range=scaling.narrow_range,
34 name=name,
35 )
36
37 class Add:
38 def __init__(self, name):
39 self.result_name = name
40
41 def eval(self, a, b):
42 return tf.add(a, b, name=self.result_name)
43
44 class Sub:
45 def __init__(self, name):
46 self.result_name = name
47
48 def eval(self, a, b):
49 return tf.subtract(a, b, name=self.result_name)
50
51 class Mul:
52 def __init__(self, name):
53 self.result_name = name
54
55 def eval(self, a, b):
56 return tf.multiply(a, b, name=self.result_name)
57
58 class Exp:
59 def __init__(self, name):
60 self.result_name = name
61
62 def eval(self, a):
63 return tf.exp(a, name=self.result_name)
64
65 class Rcp:
66 def __init__(self, name):
67 self.result_name = name
68
69 def eval(self, a):
70 return tf.math.reciprocal(a, name=self.result_name)
71
72 class Relu:
73 def __init__(self, name):
74 self.result_name = name
75
76 def eval(self, a):
77 return tf.nn.relu(a, name=self.result_name)
78
Jerry Ge93912432022-07-22 10:29:13 -070079 class Relu1:
80 def __init__(self, name):
81 self.result_name = name
82
83 def eval(self, a):
84 # TF doesn't have relu_n1_to_1 operator,
85 # use min and max as a workaround
86 # alternatively, we can use clip_by_value
87 return tf.math.minimum(1.0, tf.math.maximum(-1.0, a))
88
Jeremy Johnson015c3552022-02-23 12:15:03 +000089 class Relu6:
90 def __init__(self, name):
91 self.result_name = name
92
93 def eval(self, a):
94 return tf.nn.relu6(a, name=self.result_name)
95
96 class LeakyRelu:
97 def __init__(self, alpha, name):
98 self.alpha = alpha
99 self.result_name = name
100
101 def eval(self, a):
102 return tf.nn.leaky_relu(a, alpha=self.alpha, name=self.result_name)
103
104 class Concat:
105 def __init__(self, axis, name):
106 self.axis = axis
107 self.result_name = name
108
109 def eval(self, a, b):
110 return tf.concat([a, b], self.axis, name=self.result_name)
111
112 class BitwiseAnd:
113 def __init__(self, name):
114 self.result_name = name
115
116 def eval(self, a, b):
117 return tf.bitwise.bitwise_and(a, b, name=self.result_name)
118
119 class BitwiseOr:
120 def __init__(self, name):
121 self.result_name = name
122
123 def eval(self, a, b):
124 return tf.bitwise.bitwise_or(a, b, name=self.result_name)
125
126 class BitwiseNot:
127 def __init__(self, name):
128 self.result_name = name
129
130 def eval(self, a):
131 return tf.bitwise.invert(a, name=self.result_name)
132
133 class BitwiseXor:
134 def __init__(self, name):
135 self.result_name = name
136
137 def eval(self, a, b):
138 return tf.bitwise.bitwise_xor(a, b, name=self.result_name)
139
140 class LogicalAnd:
141 def __init__(self, name):
142 self.result_name = name
143
144 def eval(self, a, b):
145 return tf.math.logical_and(a, b, name=self.result_name)
146
147 class LogicalOr:
148 def __init__(self, name):
149 self.result_name = name
150
151 def eval(self, a, b):
152 return tf.math.logical_or(a, b, name=self.result_name)
153
154 class LogicalNot:
155 def __init__(self, name):
156 self.result_name = name
157
158 def eval(self, a):
159 return tf.math.logical_not(a, name=self.result_name)
160
161 class ReduceAny:
162 def __init__(self, axis_list, keepdims, name):
163 self.axis_list = axis_list
164 self.keepdims = keepdims
165 self.result_name = name
166
167 def eval(self, a):
168 return tf.math.reduce_any(
169 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
170 )
171
172 class ReduceAll:
173 def __init__(self, axis_list, keepdims, name):
174 self.axis_list = axis_list
175 self.keepdims = keepdims
176 self.result_name = name
177
178 def eval(self, a):
179 return tf.math.reduce_all(
180 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
181 )
182
183 class ReduceMin:
184 def __init__(self, axis_list, keepdims, name):
185 self.axis_list = axis_list
186 self.keepdims = keepdims
187 self.result_name = name
188
189 def eval(self, a):
190 return tf.math.reduce_min(
191 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
192 )
193
194 class ReduceMax:
195 def __init__(self, axis_list, keepdims, name):
196 self.axis_list = axis_list
197 self.keepdims = keepdims
198 self.result_name = name
199
200 def eval(self, a):
201 return tf.math.reduce_max(
202 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
203 )
204
205 class ReduceSum:
206 def __init__(self, axis_list, keepdims, name):
207 self.axis_list = axis_list
208 self.keepdims = keepdims
209 self.result_name = name
210
211 def eval(self, a):
212 return tf.math.reduce_sum(
213 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
214 )
215
216 class ReduceMean:
217 def __init__(self, axis_list, keepdims, name):
218 self.axis_list = axis_list
219 self.keepdims = keepdims
220 self.result_name = name
221
222 def eval(self, a):
223 return tf.math.reduce_mean(
224 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
225 )
226
227 class ReduceProduct:
228 def __init__(self, axis_list, keepdims, name):
229 self.axis_list = axis_list
230 self.keepdims = keepdims
231 self.result_name = name
232
233 def eval(self, a):
234 return tf.math.reduce_prod(
235 a, self.axis_list, keepdims=self.keepdims, name=self.result_name
236 )
237
238 class Min:
239 def __init__(self, name):
240 self.result_name = name
241
242 def eval(self, a, b):
243 return tf.math.minimum(a, b, name=self.result_name)
244
245 class Max:
246 def __init__(self, name):
247 self.result_name = name
248
249 def eval(self, a, b):
250 return tf.math.maximum(a, b, name=self.result_name)
251
252 class Pow:
253 def __init__(self, name):
254 self.result_name = name
255
256 def eval(self, a, b):
257 return tf.math.pow(a, b, name=self.result_name)
258
259 class Abs:
260 def __init__(self, name):
261 self.result_name = name
262
263 def eval(self, a):
264 return tf.math.abs(a, name=self.result_name)
265
266 class Ceil:
267 def __init__(self, name):
268 self.result_name = name
269
270 def eval(self, a):
271 return tf.math.ceil(a, name=self.result_name)
272
273 class Floor:
274 def __init__(self, name):
275 self.result_name = name
276
277 def eval(self, a):
278 return tf.math.floor(a, name=self.result_name)
279
280 class Log:
281 def __init__(self, name):
282 self.result_name = name
283
284 def eval(self, a):
285 return tf.math.log(a, name=self.result_name)
286
287 class Negate:
288 def __init__(self, name):
289 self.result_name = name
290
291 def eval(self, a):
292 return tf.math.negative(a, name=self.result_name)
293
294 class Rsqrt:
295 def __init__(self, name):
296 self.result_name = name
297
298 def eval(self, a):
299 return tf.math.rsqrt(a, name=self.result_name)
300
301 class Sigmoid:
302 def __init__(self, name):
303 self.result_name = name
304
305 def eval(self, a):
306 return tf.math.sigmoid(a, name=self.result_name)
307
308 class Tanh:
309 def __init__(self, name):
310 self.result_name = name
311
312 def eval(self, a):
313 return tf.math.tanh(a, name=self.result_name)
314
315 class Square:
316 def __init__(self, name):
317 self.result_name = name
318
319 def eval(self, a):
320 return tf.math.square(a, name=self.result_name)
321
322 class SquaredDifference:
323 def __init__(self, name):
324 self.result_name = name
325
326 def eval(self, a, b):
327 return tf.math.squared_difference(a, b, name=self.result_name)
328
329 class Equal:
330 def __init__(self, name):
331 self.result_name = name
332
333 def eval(self, a, b):
334 return tf.math.equal(a, b, name=self.result_name)
335
336 class GreaterEqual:
337 def __init__(self, name):
338 self.result_name = name
339
340 def eval(self, a, b):
341 return tf.math.greater_equal(a, b, name=self.result_name)
342
343 class Greater:
344 def __init__(self, name):
345 self.result_name = name
346
347 def eval(self, a, b):
348 return tf.math.greater(a, b, name=self.result_name)
349
350 class Less:
351 def __init__(self, name):
352 self.result_name = name
353
354 def eval(self, a, b):
355 return tf.math.less(a, b, name=self.result_name)
356
357 class LessEqual:
358 def __init__(self, name):
359 self.result_name = name
360
361 def eval(self, a, b):
362 return tf.math.less_equal(a, b, name=self.result_name)
363
364 class Conv2d:
365 def __init__(self, weight, strides, padding, dilations, name):
366 self.weight = weight
367 self.strides = strides
368 self.padding = padding
369 self.dilations = dilations
370 self.result_name = name
371
372 def eval(self, input):
373 return tf.nn.conv2d(
374 input,
375 self.weight,
376 self.strides,
377 self.padding,
378 data_format="NHWC",
379 dilations=self.dilations,
380 name=self.result_name,
381 )
382
383 class Conv2dRelu:
384 def __init__(self, weight, name):
385 self.weight = weight
386 self.result_name = name
387
388 def eval(self, input):
389 conv2d = tf.nn.conv2d(
390 input,
391 self.weight,
392 [1, 1, 1, 1],
393 "SAME",
394 data_format="NHWC",
395 dilations=[1, 1, 1, 1],
396 name="conv2d",
397 )
398 return tf.nn.relu(conv2d, name=self.result_name)
399
400 class Conv2dRelu6:
401 def __init__(self, weight, name):
402 self.weight = weight
403 self.result_name = name
404
405 def eval(self, input):
406 conv2d = tf.nn.conv2d(
407 input,
408 self.weight,
409 [1, 1, 1, 1],
410 "SAME",
411 data_format="NHWC",
412 dilations=[1, 1, 1, 1],
413 name="conv2d",
414 )
415 return tf.nn.relu6(conv2d, name=self.result_name)
416
417 class Conv2dReluN1To1:
418 def __init__(self, weight, name):
419 self.weight = weight
420 self.result_name = name
421
422 def eval(self, input):
423 conv2d = tf.nn.conv2d(
424 input,
425 self.weight,
426 [1, 1, 1, 1],
427 "SAME",
428 data_format="NHWC",
429 dilations=[1, 1, 1, 1],
430 name="conv2d",
431 )
432 return tf.clip_by_value(conv2d, -1.0, 1.0, name=self.result_name)
433
434 class Conv2dTanh:
435 def __init__(self, weight, name):
436 self.weight = weight
437 self.result_name = name
438
439 def eval(self, input):
440 conv2d = tf.nn.conv2d(
441 input,
442 self.weight,
443 [1, 1, 1, 1],
444 "SAME",
445 data_format="NHWC",
446 dilations=[1, 1, 1, 1],
447 name="conv2d",
448 )
449 return tf.math.tanh(conv2d, name=self.result_name)
450
451 class Conv2dWithBias:
452 def __init__(self, weight, bias, strides, padding, dilations, name):
453 self.weight = weight
454 self.bias = bias
455 self.strides = strides
456 self.padding = padding
457 self.dilations = dilations
458 self.result_name = name
459
460 def eval(self, input):
461 conv2d_op = tf.nn.conv2d(
462 input,
463 self.weight,
464 self.strides,
465 self.padding,
466 data_format="NHWC",
467 dilations=self.dilations,
468 name="conv2d",
469 )
470 bias_add_op = tf.nn.bias_add(
471 conv2d_op, self.bias, data_format="NHWC", name=self.result_name
472 )
473 return bias_add_op
474
475 class DepthwiseConv2d:
476 def __init__(self, weight, strides, padding, dilations, name):
477 self.weight = weight
478 self.strides = strides
479 self.padding = padding
480 self.dilations = dilations
481 self.result_name = name
482
483 def eval(self, input):
484 dws_conv2d = tf.nn.depthwise_conv2d(
485 input,
486 self.weight,
487 self.strides,
488 self.padding,
489 data_format="NHWC",
490 dilations=self.dilations,
491 name="dws_conv2d",
492 )
493 return tf.identity(dws_conv2d, name=self.result_name)
494
495 class DepthwiseConv2dWithBias:
496 def __init__(self, weight, bias, strides, padding, dilations, name):
497 self.weight = weight
498 self.bias = bias
499 self.strides = strides
500 self.padding = padding
501 self.dilations = dilations
502 self.result_name = name
503
504 def eval(self, input):
505 dws_conv2d = tf.nn.depthwise_conv2d(
506 input,
507 self.weight,
508 self.strides,
509 self.padding,
510 data_format="NHWC",
511 dilations=self.dilations,
512 name="dws_conv2d",
513 )
514 bias_add_op = tf.nn.bias_add(
515 dws_conv2d, self.bias, data_format="NHWC", name=self.result_name
516 )
517 return bias_add_op
518
519 class TransposeConv2d:
520 def __init__(self, weight, output_shape, strides, padding, name):
521 self.weight = weight
522 self.output_shape = output_shape
523 self.strides = strides
524 self.padding = padding
525 self.result_name = name
526
527 def eval(self, input):
528 return tf.nn.conv2d_transpose(
529 input,
530 self.weight,
531 self.output_shape,
532 self.strides,
533 self.padding,
534 data_format="NHWC",
535 name=self.result_name,
536 )
537
538 class Argmax:
539 def __init__(self, axis, name):
540 self.axis = axis
541 self.result_name = name
542
543 def eval(self, a):
544 return tf.argmax(a, self.axis, output_type=tf.int32, name=self.result_name)
545
546 class AvgPool2d:
547 def __init__(self, strides, kernel_size, padding, name):
548 self.strides = strides
549 self.kernel_size = kernel_size
550 self.padding = padding
551 self.result_name = name
552
553 def eval(self, input):
554 return tf.nn.avg_pool2d(
555 input,
556 strides=self.strides,
557 ksize=self.kernel_size,
558 padding=self.padding,
559 data_format="NHWC",
560 name=self.result_name,
561 )
562
563 class MaxPool2d:
564 def __init__(self, strides, kernel_size, padding, name):
565 self.strides = strides
566 self.kernel_size = kernel_size
567 self.padding = padding
568 self.result_name = name
569
570 def eval(self, input):
571 return tf.nn.max_pool2d(
572 input,
573 strides=self.strides,
574 ksize=self.kernel_size,
575 padding=self.padding,
576 data_format="NHWC",
577 name=self.result_name,
578 )
579
580 class Reshape:
581 def __init__(self, shape, name):
582 self.shape = shape
583 self.result_name = name
584
585 def eval(self, a):
586 reshape_op = tf.reshape(a, self.shape)
587 return tf.identity(reshape_op, name=self.result_name)
588
589 class Transpose:
590 def __init__(self, perm, name):
591 self.perm = perm
592 self.result_name = name
593
594 def eval(self, a):
595 return tf.transpose(a, self.perm, name=self.result_name)
596
597 class Slice:
598 def __init__(self, begin, size, name):
599 self.begin = begin
600 self.size = size
601 self.result_name = name
602
603 def eval(self, a):
604 return tf.slice(a, begin=self.begin, size=self.size, name=self.result_name)
605
606 class StridedSlice:
607 def __init__(
608 self,
609 begin,
610 end,
611 strides,
612 begin_mask,
613 end_mask,
614 ellipsis_mask,
615 new_axis_mask,
616 shrink_axis_mask,
617 name,
618 ):
619 self.begin = begin
620 self.end = end
621 self.strides = strides
622 self.begin_mask = begin_mask
623 self.end_mask = end_mask
624 self.ellipsis_mask = ellipsis_mask
625 self.new_axis_mask = new_axis_mask
626 self.shrink_axis_mask = shrink_axis_mask
627 self.result_name = name
628
629 def eval(self, a):
630 return tf.strided_slice(
631 a,
632 begin=self.begin,
633 end=self.end,
634 strides=self.strides,
635 begin_mask=self.begin_mask,
636 end_mask=self.end_mask,
637 ellipsis_mask=self.ellipsis_mask,
638 new_axis_mask=self.new_axis_mask,
639 shrink_axis_mask=self.shrink_axis_mask,
640 name=self.result_name,
641 )
642
643 class Select:
644 def __init__(self, name):
645 self.result_name = name
646
647 def eval(self, selector, a, b):
648 return tf.where(condition=selector, x=a, y=b, name=self.result_name)
649
650 class Addn:
651 def __init__(self, name):
652 self.result_name = name
653
654 def eval(self, a, b, c, d):
655 return tf.add_n([a, b, c, d], name=self.result_name)
656
657 class Concatv2:
658 def __init__(self, axis, name):
659 self.axis = axis
660 self.result_name = name
661
662 def eval(self, a, b, c, d):
663 return tf.concat([a, b, c, d], axis=self.axis, name=self.result_name)
664
665 class Stack:
666 def __init__(self, axis, name):
667 self.axis = axis
668 self.result_name = name
669
670 def eval(self, a, b, c, d):
671 return tf.stack([a, b, c, d], axis=self.axis, name=self.result_name)
672
673 class Unstack:
674 def __init__(self, axis, name):
675 self.axis = axis
676 self.result_name = name
677
678 def eval(self, a):
679 unstack_op = tf.unstack(a, axis=self.axis, name="unstack_op")
680 result_count = a.shape[self.axis]
681
682 if result_count == 1:
683 return tf.identity(unstack_op[0], name=self.result_name)
684
685 sums = []
686 for i in range(result_count):
687 sums.append(
688 tf.math.reduce_sum(unstack_op[i], name="reduce_{}".format(i))
689 )
690 return tf.stack(sums, 0, name=self.result_name)
691
692 class Pad:
693 def __init__(self, padding, name):
694 self.padding = padding
695 self.result_name = name
696
697 def eval(self, a):
698 return tf.pad(
699 a,
700 self.padding,
701 mode="CONSTANT",
702 constant_values=0,
703 name=self.result_name,
704 )
705
706 class ExpandDims:
707 def __init__(self, axis, name):
708 self.axis = axis
709 self.result_name = name
710
711 def eval(self, a):
712 return tf.expand_dims(a, self.axis, name=self.result_name)
713
714 class Shape:
715 def __init__(self, name):
716 self.result_name = name
717
718 def eval(self, a):
719 return tf.shape(a, name=self.result_name)
720
721 class Rank:
722 def __init__(self, name):
723 self.result_name = name
724
725 def eval(self, a):
726 return tf.rank(a, name=self.result_name)
727
728 class Fill:
729 def __init__(self, shape, value, name):
730 self.shape = shape
731 self.value = value
732 self.result_name = name
733
734 def eval(self, a):
735 return tf.fill(self.shape, self.value, name=self.result_name)
736
737 class Elu:
738 def __init__(self, name):
739 self.result_name = name
740
741 def eval(self, a):
742 return tf.nn.elu(a, name=self.result_name)
743
744 class Softmax:
745 def __init__(self, name):
746 self.result_name = name
747
748 def eval(self, a):
749 return tf.nn.softmax(a, name=self.result_name)
750
751 class LogSoftmax:
752 def __init__(self, name):
753 self.result_name = name
754
755 def eval(self, a):
756 return tf.nn.log_softmax(a, name=self.result_name)
757
758 class MatMul:
759 def __init__(self, name):
760 self.result_name = name
761
762 def eval(self, a, b):
763 return tf.linalg.matmul(a, b, name=self.result_name)
764
765 class AddScalar:
766 def __init__(self, name):
767 self.result_name = name
768
769 def eval(self, a):
770 return tf.add(a, 1, name=self.result_name)
771
772 class Add1d:
773 def __init__(self, name):
774 self.result_name = name
775
776 def eval(self, a, b):
777 if len(b.shape) > 1:
778 b_1d = tf.reduce_sum(b, axis=list(range(0, len(b.shape) - 1, 1)))
779 else:
780 b_1d = b
781 return tf.add(a, b_1d, name=self.result_name)
782
783 class Split:
784 def __init__(self, num_splits, axis, name):
785 self.num_splits = num_splits
786 self.axis = axis
787 self.result_name = name
788
789 def eval(self, a):
790 # The split op generates a list of outputs. Since we have difficulty
791 # serializing a list or array of Numpy arrays, we will reduce each of
792 # the results
793
794 if not isinstance(self.num_splits, list):
795 split_op = tf.split(
796 a, num_or_size_splits=self.num_splits, axis=self.axis, name="split"
797 )
798 result_count = self.num_splits
799 else:
800 num_split = np.asarray(self.num_splits, dtype=np.int32)
801 split_vec_op = tf.compat.v1.constant(
802 num_split,
803 shape=num_split.shape,
804 dtype=tf.int32,
805 name="const_split_vec",
806 )
807 split_op = tf.split(
808 a, num_or_size_splits=split_vec_op, axis=self.axis, name="split"
809 )
810 result_count = num_split.shape[0]
811
812 sums = []
813 for i in range(result_count):
814 sums.append(tf.math.reduce_sum(split_op[i], name="reduce_{}".format(i)))
815 return tf.stack(sums, 0, name=self.result_name)
816
817 class Tile:
818 def __init__(self, multiples, name):
819 self.multiples = multiples
820 self.result_name = name
821
822 def eval(self, a):
823 t = tf.tile(a, self.multiples, name="tile")
824 return tf.identity(t, name=self.result_name)
825
826 class Reverse:
827 def __init__(self, axis, name):
828 self.axis = axis
829 self.result_name = name
830
831 def eval(self, a):
832 return tf.reverse(a, [self.axis], name=self.result_name)
833
834 class Gather:
835 def __init__(self, indices, batch_dims, axis, name):
836 self.indices = indices
837 self.batch_dims = batch_dims
838 self.axis = axis
839 self.result_name = name
840
841 def eval(self, a):
842 return tf.gather(
843 a,
844 self.indices,
845 batch_dims=self.batch_dims,
846 axis=self.axis,
847 name=self.result_name,
848 )
849
850 class GatherNd:
851 def __init__(self, indices, name):
852 self.indices = indices
853 self.result_name = name
854
855 def eval(self, a):
856 return tf.gather_nd(a, self.indices, name=self.result_name)
857
858 class ScatterNd:
859 def __init__(self, shape, indices_shape, N, rng, name):
860 self.shape = shape
861 self.indices_shape = indices_shape
862 self.N = N
863 self.rng = rng
864 self.result_name = name
865
866 def eval(self, a):
867
868 # This operator is special. The indices and updates tensors really need
869 # to be created together, but in the current structure of this tool there
870 # is no way to do that before now. The number of updates is determined by
871 # the indices, so we can really only create that after indices; but we
872 # don't know the type at that time.
873 #
874 # Shapes are guaranteed deterministic, but we'll use our rng
875 # copied from the arggen stage. It's possible that index and
876 # update *values* will be non-deterministic.
877 #
878 # We take the tensor_tensor simply to get the dtype.
879
880 shape_const = tf.constant(self.shape, tf.int32)
881
882 updates_shape = list(self.indices_shape[:-1])
883 updates_shape.extend(self.shape[self.indices_shape[-1] :])
884
885 updates_const = tf.constant(TGen.getRand(updates_shape, a.dtype, self.rng))
886
887 indices = np.zeros(self.indices_shape, dtype=np.int32)
888
889 # We need to generate the random indices tensor based on the
890 # limits of 'shape' for each dimension. Surely, there is a faster
891 # vectorized way to do this, but the tensors are fairly small so we
892 # will do this one element at a time. Each element needs to be sized based
893 # on the size of the last dimension.
894 for idx in np.ndindex(indices.shape):
895 indices[idx] = self.rng.integers(0, self.shape[idx[-1]], size=1)[0]
896 # print('{} {}'.format(idx, indices[idx]))
897
898 indices_const = tf.constant(indices, dtype=tf.int32)
899
900 return tf.scatter_nd(
901 indices=indices_const,
902 updates=updates_const,
903 shape=shape_const,
904 name=self.result_name,
905 )
906
907 class SpaceToBatch:
908 def __init__(self, block_shape, padding, name):
909 self.block_shape = block_shape
910 self.padding = padding
911 self.result_name = name
912
913 def eval(self, a):
914 return tf.space_to_batch(
915 a, self.block_shape, self.padding, name=self.result_name
916 )
917
918 class BatchToSpace:
919 def __init__(self, block_shape, cropping, name):
920 self.block_shape = block_shape
921 self.cropping = cropping
922 self.result_name = name
923
924 def eval(self, a):
925 # transpose to swap depth and batch first. this could avoid adding new shape
926 block_rank = len(self.block_shape)
927 perm = [len(a.shape) - 1]
928 for i in range(block_rank):
929 perm.append(i + 1)
930 perm.append(0)
931 transpose_op = tf.transpose(a, perm)
932 return tf.batch_to_space(
933 transpose_op, self.block_shape, self.cropping, name=self.result_name
934 )
935
936 class SpaceToDepth:
937 def __init__(self, block_shape, name):
938 self.block_shape = block_shape
939 self.result_name = name
940
941 def eval(self, a):
942 return tf.nn.space_to_depth(a, self.block_shape, name=self.result_name)
943
944 class DepthToSpace:
945 def __init__(self, block_shape, name):
946 self.block_shape = block_shape
947 self.result_name = name
948
949 def eval(self, a):
950 return tf.nn.depth_to_space(a, self.block_shape, name=self.result_name)
951
952 class OneHot:
953 def __init__(self, depth, axis, name):
954 self.depth = depth
955 self.axis = axis
956 self.result_name = name
957
958 def eval(self, indices, on_value, off_value):
959 return tf.one_hot(
960 indices,
961 self.depth,
962 on_value,
963 off_value,
964 self.axis,
965 on_value.dtype,
966 self.result_name,
967 )
968
969 class Fakequant:
970 def __init__(self, num_bits, narrow_range, name):
971 self.num_bits = num_bits
972 self.narrow_range = narrow_range
973 self.result_name = name
974
975 def eval(self, a):
976 return tf.quantization.fake_quant_with_min_max_args(
977 a,
978 min=-2.0,
979 max=2.0,
980 num_bits=self.num_bits,
981 narrow_range=self.narrow_range,
982 name=self.result_name,
983 )
984
985 class ResizeNearest:
986 def __init__(self, name):
987 self.result_name = name
988
989 def eval(self, a):
990 out_shape = []
991 out_shape.append(a.shape[1] * 2)
992 out_shape.append(a.shape[2] * 2)
993
994 # tf.image.resize() will overwrite the node name with result_name +
995 # '/BILINEAR' need to add extra identity to force output tensor name to
996 # result_name return tf.image.resize(a, out_shape,
997 # method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, name=result_name)
998 resize = tf.image.resize(
999 a,
1000 out_shape,
1001 method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
1002 name="resize",
1003 )
1004 return tf.identity(resize, name=self.result_name)
1005
1006 class ResizeBilinear:
1007 def __init__(self, name):
1008 self.result_name = name
1009
1010 def eval(self, a):
1011 out_shape = []
1012 out_shape.append(a.shape[1] * 2)
1013 out_shape.append(a.shape[2] * 2)
1014
1015 # tf.image.resize() will overwrite the node name with result_name +
1016 # '/BILINEAR' need to add extra identity to force output tensor name to
1017 # result_name return tf.image.resize(a, out_shape,
1018 # method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, name=result_name)
1019 resize = tf.image.resize(
1020 a, out_shape, method=tf.image.ResizeMethod.BILINEAR, name="resize"
1021 )
1022 return tf.identity(resize, name=self.result_name)
1023
TatWai Chongf7326092022-06-08 12:17:14 -07001024 # New tf resize set (align_corners, half_pixel_centers) = (false, true) by default.
1025 # Test the rest option combinations here.
1026 # Note that (align_corners, half_pixel_centers) = (true, true) is NOT valid.
1027 class ResizeBilinearV1AlignCorners:
1028 def __init__(self, name):
1029 self.result_name = name
1030
1031 def eval(self, a):
1032 out_shape = []
1033 out_shape.append(a.shape[1] * 2)
1034 out_shape.append(a.shape[2] * 2)
1035
1036 resize = tf.compat.v1.image.resize_bilinear(
1037 a,
1038 out_shape,
1039 align_corners=True,
1040 name="resize",
1041 half_pixel_centers=False,
1042 )
1043 return tf.identity(resize, name=self.result_name)
1044
1045 class ResizeBilinearV1None:
1046 def __init__(self, name):
1047 self.result_name = name
1048
1049 def eval(self, a):
1050 out_shape = []
1051 out_shape.append(a.shape[1] * 2)
1052 out_shape.append(a.shape[2] * 2)
1053
1054 resize = tf.compat.v1.image.resize_bilinear(
1055 a,
1056 out_shape,
1057 align_corners=False,
1058 name="resize",
1059 half_pixel_centers=False,
1060 )
1061 return tf.identity(resize, name=self.result_name)
1062
Jeremy Johnson015c3552022-02-23 12:15:03 +00001063 class LeftShift:
1064 def __init__(self, shift, name):
1065 self.shift = shift
1066 self.result_name = name
1067
1068 def eval(self, a):
1069 return tf.bitwise.left_shift(a, self.shift, name=self.result_name)
1070
1071 class RightShift:
1072 def __init__(self, shift, name):
1073 self.shift = shift
1074 self.result_name = name
1075
1076 def eval(self, a):
1077 return tf.bitwise.right_shift(a, self.shift, name=self.result_name)