blob: 658aa07d1df69f789a2a77613384cd7273054d93 [file] [log] [blame]
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01001/// Copyright (c) 2021 ARM Limited and Contributors. All rights reserved.
2///
3/// SPDX-License-Identifier: MIT
4///
5
6namespace armnn
7{
8/**
9@page operator_list Arm NN Operators
10
11@tableofcontents
12
13@section S5_1_operator_list Arm NN Operators
14
15Arm NN supports operators that are listed in below table.
16
17Arm NN supports a wide list of data-types.
18The main data-types that the Machine Learning functions support are the following:
19 <ul>
20 <li><b>BFLOAT16:</b> 16-bit non-standard brain floating point
21 <li><b>QASYMMU8:</b> 8-bit unsigned asymmetric quantized
22 <li><b>QASYMMS8:</b> 8-bit signed asymmetric quantized
23 <li><b>QUANTIZEDSYMM8PERAXIS:</b> 8-bit signed symmetric quantized
Cathal Corbettb85113e2022-02-22 11:51:43 +000024 <li><b>QSYMMS8:</b> 8-bit signed symmetric quantized
25 <li><b>QSYMMS16:</b> 16-bit signed symmetric quantized
Sadik Armagan1a9c9f62021-08-05 09:25:15 +010026 <li><b>FLOAT32:</b> 32-bit single precision floating point
27 <li><b>FLOAT16:</b> 16-bit half precision floating point
28 <li><b>SIGNED32:</b> 32-bit signed integer
29 <li><b>BOOLEAN:</b> 8-bit unsigned char
30 <li><b>All:</b> Agnostic to any specific data type
31 </ul>
32
33Arm NN supports the following data layouts (fast changing dimension from right to left):
34 <ul>
35 <li><b>NHWC:</b> Layout where channels are in the fastest changing dimension
36 <li><b>NCHW:</b> Layout where width is in the fastest changing dimension
37 <li><b>All:</b> Agnostic to any specific data layout
38 </ul>
39where N = batches, C = channels, H = height, W = width
40
41<table>
42<caption id="multi_row"></caption>
43<tr>
44 <th>Operator
45 <th>Description
46 <th>Equivalent Android NNAPI Operator
47 <th>Backends
48 <th>Data Layouts
49 <th>Data Types
50<tr>
51 <td rowspan="3">AbsLayer
52 <td rowspan="3"> Layer to perform absolute operation.
53 <td rowspan="3">
54 <ul>
55 <li>ANEURALNETWORKS_ABS
56 </ul>
57 <td>CpuRef
58 <td>
59 <ul>
60 <li>All
61 </ul>
62 <td>
63 <table>
64 <tr><th>
65 <tr><td>BFLOAT16
66 <tr><td>FLOAT16
67 <tr><td>FLOAT32
68 <tr><td>QASYMMS8
69 <tr><td>QASYMMU8
70 <tr><td>QSYMMS16
71 <tr><td>SIGNED32
72 </table>
73<tr>
74 <td>CpuAcc
75 <td>
76 <ul>
77 <li>All
78 </ul>
79 <td>
80 <table>
81 <tr><th>
82 <tr><td>FLOAT16
83 <tr><td>FLOAT32
84 <tr><td>SIGNED32
85 </table>
86<tr>
87 <td>GpuAcc
88 <td>
89 <ul>
90 <li>All
91 </ul>
92 <td>
93 <table>
94 <tr><th>
95 <tr><td>FLOAT16
96 <tr><td>FLOAT32
97 </table>
98<tr>
99 <td rowspan="3">ActivationLayer
100 <td rowspan="3" style="width:200px;"> Layer to simulate an activation layer with the specified activation function.
101 <td rowspan="3">
102 <ul>
103 <li>ANEURALNETWORKS_ABS
104 <li>ANEURALNETWORKS_ELU
105 <li>ANEURALNETWORKS_HARD_SWISH
106 <li>ANEURALNETWORKS_LOGISTIC
107 <li>ANEURALNETWORKS_PRELU
108 <li>ANEURALNETWORKS_RELU
109 <li>ANEURALNETWORKS_RELU1
110 <li>ANEURALNETWORKS_RELU6
111 <li>ANEURALNETWORKS_SQRT
112 <li>ANEURALNETWORKS_TANH
113 </ul>
114 <td>CpuRef
115 <td>
116 <ul>
117 <li>All
118 </ul>
119 <td>
120 <table>
121 <tr><th>
122 <tr><td>BFLOAT16
123 <tr><td>FLOAT16
124 <tr><td>FLOAT32
125 <tr><td>QASYMMS8
126 <tr><td>QASYMMU8
127 <tr><td>QSYMMS16
128 </table>
129<tr>
130 <td>CpuAcc
131 <td>
132 <ul>
133 <li>All
134 </ul>
135 <td>
136 <table>
137 <tr><th>
138 <tr><td>QASYMMU8
139 <tr><td>QASYMMS8
140 <tr><td>QSYMMS16
141 <tr><td>FLOAT16
142 <tr><td>FLOAT32
143 </table>
144<tr>
145 <td>GpuAcc
146 <td>
147 <ul>
148 <li>All
149 </ul>
150 <td>
151 <table>
152 <tr><th>
153 <tr><td>QASYMMU8
154 <tr><td>QASYMMS8
155 <tr><td>QSYMMS16
156 <tr><td>FLOAT16
157 <tr><td>FLOAT32
158 </table>
159<tr>
160 <td rowspan="3">AdditionLayer
161 <td rowspan="3" style="width:200px;"> Layer to add 2 tensors.
162 <td rowspan="3">
163 <ul>
164 <li>ANEURALNETWORKS_ADD
165 </ul>
166 <td>CpuRef
167 <td>
168 <ul>
169 <li>All
170 </ul>
171 <td>
172 <table>
173 <tr><th>
174 <tr><td>BFLOAT16
175 <tr><td>FLOAT16
176 <tr><td>FLOAT32
177 <tr><td>QASYMMS8
178 <tr><td>QASYMMU8
179 <tr><td>QSYMMS16
180 <tr><td>SIGNED32
181 </table>
182<tr>
183 <td>CpuAcc
184 <td>
185 <ul>
186 <li>All
187 </ul>
188 <td>
189 <table>
190 <tr><th>
191 <tr><td>QASYMMU8
192 <tr><td>QASYMMS8
193 <tr><td>QSYMMS16
194 <tr><td>SIGNED32
195 <tr><td>FLOAT16
196 <tr><td>FLOAT32
197 </table>
198<tr>
199 <td>GpuAcc
200 <td>
201 <ul>
202 <li>All
203 </ul>
204 <td>
205 <table>
206 <tr><th>
207 <tr><td>QASYMMU8
208 <tr><td>QASYMMS8
209 <tr><td>QSYMMS16
210 <tr><td>SIGNED32
211 <tr><td>FLOAT16
212 <tr><td>FLOAT32
213 </table>
214<tr>
215 <td rowspan="3">ArgMinMaxLayer
216 <td rowspan="3" style="width:200px;"> Layer to calculate the index of the minimum or maximum values in a tensor
217 based on an axis.
218 <td rowspan="3">
219 <ul>
220 <li>ANEURALNETWORKS_ARGMAX
221 <li>ANEURALNETWORKS_ARGMIN
222 </ul>
223 <td>CpuRef
224 <td>
225 <ul>
226 <li>All
227 </ul>
228 <td>
229 <table>
230 <tr><th>
231 <tr><td>BFLOAT16
232 <tr><td>FLOAT16
233 <tr><td>FLOAT32
234 <tr><td>QASYMMS8
235 <tr><td>QASYMMU8
236 <tr><td>QSYMMS16
237 <tr><td>SIGNED32
238 <tr><td>SIGNED64
239 </table>
240<tr>
241 <td>CpuAcc
242 <td>
243 <ul>
244 <li>All
245 </ul>
246 <td>
247 <table>
248 <tr><th>
249 <tr><td>QASYMMU8
250 <tr><td>QASYMMS8
251 <tr><td>SIGNED32
252 <tr><td>FLOAT16
253 <tr><td>FLOAT32
254 </table>
255<tr>
256 <td>GpuAcc
257 <td>
258 <ul>
259 <li>All
260 </ul>
261 <td>
262 <table>
263 <tr><th>
264 <tr><td>QASYMMU8
265 <tr><td>QASYMMS8
266 <tr><td>SIGNED32
267 <tr><td>FLOAT16
268 <tr><td>FLOAT32
269 </table>
270<tr>
Samuel Yap4b7a34d2022-07-06 15:36:03 +0100271 <td rowspan="3">BatchMatMulLayer
272 <td rowspan="3" style="width:200px;"> Layer to perform batch matrix multiplication.
273 <td rowspan="3">
274 <ul>
275 <li>N/A
276 </ul>
277 <td>CpuRef
278 <td>
279 <ul>
280 <li>All
281 </ul>
282 <td>
283 <table>
284 <tr><th>
285 <tr><td>BFLOAT16
286 <tr><td>FLOAT16
287 <tr><td>FLOAT32
288 <tr><td>QASYMMS8
289 <tr><td>QASYMMU8
290 <tr><td>QSYMMS16
291 </table>
292<tr>
293 <td>CpuAcc
294 <td>
295 <ul>
296 <li>N/A
297 </ul>
298 <td>
299 <ul>
300 <li>N/A
301 </ul>
302<tr>
303 <td>GpuAcc
304 <td>
305 <ul>
306 <li>N/A
307 </ul>
308 <td>
309 <ul>
310 <li>N/A
311 </ul>
312<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100313 <td rowspan="3">BatchNormalizationLayer
314 <td rowspan="3" style="width:200px;"> Layer to perform batch normalization.
315 <td rowspan="3">
316 <ul>
317 <li>N/A
318 </ul>
319 <td>CpuRef
320 <td>
321 <ul>
322 <li>All
323 </ul>
324 <td>
325 <table>
326 <tr><th>
327 <tr><td>BFLOAT16
328 <tr><td>FLOAT16
329 <tr><td>FLOAT32
330 <tr><td>QASYMMS8
331 <tr><td>QASYMMU8
332 <tr><td>QSYMMS16
333 </table>
334<tr>
335 <td>CpuAcc
336 <td>
337 <ul>
338 <li>NHWC
339 <li>NCHW
340 </ul>
341 <td>
342 <table>
343 <tr><th>
344 <tr><td>FLOAT32
345 <tr><td>FLOAT16
346 </table>
347<tr>
348 <td>GpuAcc
349 <td>
350 <ul>
351 <li>NHWC
352 <li>NCHW
353 </ul>
354 <td>
355 <table>
356 <tr><th>
357 <tr><td>FLOAT32
358 <tr><td>FLOAT16
359 </table>
360<tr>
361 <td rowspan="3">BatchToSpaceNdLayer
362 <td rowspan="3" style="width:200px;"> Layer to perform a batch to space transformation.
363 <td rowspan="3">
364 <ul>
365 <li>ANEURALNETWORKS_BATCH_TO_SPACE_ND
366 </ul>
367 <td>CpuRef
368 <td>
369 <ul>
370 <li>All
371 </ul>
372 <td>
373 <table>
374 <tr><th>
375 <tr><td>BFLOAT16
376 <tr><td>FLOAT16
377 <tr><td>FLOAT32
378 <tr><td>QASYMMS8
379 <tr><td>QASYMMU8
380 <tr><td>QSYMMS16
381 </table>
382<tr>
383 <td>CpuAcc
384 <td>
385 <ul>
386 <li>NHWC
387 <li>NCHW
388 </ul>
389 <td>
390 <table>
391 <tr><th>
392 <tr><td>All
393 </table>
394<tr>
395 <td>GpuAcc
396 <td>
397 <ul>
398 <li>NHWC
399 <li>NCHW
400 </ul>
401 <td>
402 <table>
403 <tr><th>
404 <tr><td>All
405 </table>
406<tr>
407 <td rowspan="3">CastLayer
408 <td rowspan="3" style="width:200px;"> Layer to cast a tensor to a type.
409 <td rowspan="3">
410 <ul>
411 <li>ANEURALNETWORKS_CAST
412 </ul>
413 <td>CpuRef
414 <td>
415 <ul>
416 <li>All
417 </ul>
418 <td>
419 <table>
420 <tr><th>
421 <tr><td>BFLOAT16
422 <tr><td>FLOAT16
423 <tr><td>FLOAT32
424 <tr><td>QSYMMS8
425 <tr><td>QASYMMS8
426 <tr><td>QASYMMU8
427 <tr><td>QSYMMS16
428 <tr><td>SIGNED32
429 </table>
430<tr>
431 <td>CpuAcc
432 <td>
433 <ul>
434 <li>All
435 </ul>
436 <td>
437 <table>
438 <tr><th>
439 <tr><td>QASYMMS8
440 <tr><td>QASYMMU8
441 <tr><td>FLOAT16
442 <tr><td>SIGNED32
443 <tr><td>FLOAT32
444 </table>
445<tr>
446 <td>GpuAcc
447 <td>
448 <ul>
449 <li>All
450 </ul>
451 <td>
452 <table>
453 <tr><th>
454 <tr><td>QASYMMS8
455 <tr><td>QASYMMU8
456 <tr><td>SIGNED32
457 <tr><td>FLOAT16
458 <tr><td>FLOAT32
459 </table>
460<tr>
Teresa Charlincd203852021-09-24 18:15:39 +0100461 <td rowspan="3">ChannelShuffleLayer
462 <td rowspan="3" style="width:200px;"> Layer to reorganize the channels of a tensor.
463 <td rowspan="3">
464 <ul>
465 <li>ANEURALNETWORKS_CHANNEL_SHUFFLE
466 </ul>
467 <td>CpuRef
468 <td>
469 <ul>
470 <li>All
471 </ul>
472 <td>
473 <table>
474 <tr><th>
475 <tr><td>FLOAT16
476 <tr><td>FLOAT32
477 <tr><td>QSYMMS8
478 <tr><td>QASYMMS8
479 <tr><td>QASYMMU8
480 </table>
481<tr>
482 <td>CpuAcc
483 <td>
484 <ul>
485 <li>All
486 </ul>
487 <td>
488 <table>
489 <tr><th>
490 <tr><td>QASYMMS8
491 <tr><td>QASYMMU8
492 <tr><td>FLOAT16
493 <tr><td>FLOAT32
494 </table>
495<tr>
496 <td>GpuAcc
497 <td>
498 <ul>
499 <li>All
500 </ul>
501 <td>
502 <table>
503 <tr><th>
504 <tr><td>QASYMMS8
505 <tr><td>QASYMMU8
506 <tr><td>FLOAT16
507 <tr><td>FLOAT32
508 </table>
509<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100510 <td rowspan="3">ComparisonLayer
511 <td rowspan="3" style="width:200px;"> Layer to compare 2 tensors.
512 <td rowspan="3">
513 <ul>
514 <li>ANEURALNETWORKS_EQUAL
515 <li>ANEURALNETWORKS_GREATER
516 <li>ANEURALNETWORKS_GREATER_EQUAL
517 <li>ANEURALNETWORKS_LESS
518 <li>ANEURALNETWORKS_LESS_EQUAL
519 <li>ANEURALNETWORKS_NOT_EQUAL
520 </ul>
521 <td>CpuRef
522 <td>
523 <ul>
524 <li>All
525 </ul>
526 <td>
527 <table>
528 <tr><th>
529 <tr><td>BFLOAT16
530 <tr><td>FLOAT16
531 <tr><td>FLOAT32
532 <tr><td>BOOLEAN
533 <tr><td>QASYMMS8
534 <tr><td>QASYMMU8
535 <tr><td>QSYMMS16
536 <tr><td>SIGNED32
537 </table>
538<tr>
539 <td>CpuAcc
540 <td>
541 <ul>
542 <li>All
543 </ul>
544 <td>
545 <table>
546 <tr><th>
547 <tr><td>All
548 </table>
549<tr>
550 <td>GpuAcc
551 <td>
552 <ul>
553 <li>All
554 </ul>
555 <td>
556 <table>
557 <tr><th>
558 <tr><td>All
559 </table>
560<tr>
561 <td rowspan="3">ConcatLayer
562 <td rowspan="3" style="width:200px;"> Layer to concatenate tensors along a given axis.
563 <td rowspan="3">
564 <ul>
565 <li>ANEURALNETWORKS_CONCATENATION
566 </ul>
567 <td>CpuRef
568 <td>
569 <ul>
570 <li>All
571 </ul>
572 <td>
573 <table>
574 <tr><th>
575 <tr><td>BFLOAT16
576 <tr><td>FLOAT16
577 <tr><td>FLOAT32
578 <tr><td>QASYMMS8
579 <tr><td>QASYMMU8
580 <tr><td>QSYMMS16
581 </table>
582<tr>
583 <td>CpuAcc
584 <td>
585 <ul>
586 <li>All
587 </ul>
588 <td>
589 <table>
590 <tr><th>
591 <tr><td>QASYMMU8
592 <tr><td>QASYMMS8
593 <tr><td>FLOAT16
594 <tr><td>FLOAT32
595 </table>
596<tr>
597 <td>GpuAcc
598 <td>
599 <ul>
600 <li>All
601 </ul>
602 <td>
603 <table>
604 <tr><th>
605 <tr><td>QASYMMU8
606 <tr><td>QASYMMS8
607 <tr><td>FLOAT16
608 <tr><td>FLOAT32
609 </table>
610<tr>
611 <td rowspan="3">ConstantLayer
612 <td rowspan="3" style="width:200px;"> Layer to provide a constant tensor.
613 <td rowspan="3">
614 <ul>
615 <li>N/A
616 </ul>
617 <td>CpuRef
618 <td>
619 <ul>
620 <li>All
621 </ul>
622 <td>
623 <table>
624 <tr><th>
625 <tr><td>BFLOAT16
626 <tr><td>FLOAT16
627 <tr><td>FLOAT32
628 <tr><td>QASYMMS8
629 <tr><td>QASYMMU8
630 <tr><td>QSYMMS8
631 <tr><td>QSYMMS16
632 <tr><td>SIGNED32
633 </table>
634<tr>
635 <td>CpuAcc
636 <td>
637 <ul>
638 <li>All
639 </ul>
640 <td>
641 <table>
642 <tr><th>
643 <tr><td>All
644 </table>
645<tr>
646 <td>GpuAcc
647 <td>
648 <ul>
649 <li>All
650 </ul>
651 <td>
652 <table>
653 <tr><th>
654 <tr><td>All
655 </table>
656<tr>
657 <td rowspan="3">ConvertBf16ToFp32Layer
658 <td rowspan="3" style="width:200px;"> Layer to convert BFloat16 tensor to Float32 tensor.
659 <td rowspan="3">
660 <ul>
661 <li>N/A
662 </ul>
663 <td>CpuRef
664 <td>
665 <ul>
666 <li>All
667 </ul>
668 <td>
669 <table>
670 <tr><th>
671 <tr><td>BFLOAT16
672 <tr><td>FLOAT32
673 </table>
674<tr>
675 <td>CpuAcc
676 <td>
677 <ul>
678 <li>All
679 </ul>
680 <td>
681 <table>
682 <tr><th>
683 <tr><td>BFLOAT16
684 <tr><td>FLOAT32
685 </table>
686<tr>
687 <td>GpuAcc
688 <td>
689 <ul>
690 <li>All
691 </ul>
692 <td>
693 <table>
694 <tr><th>
695 <tr><td>BFLOAT16
696 <tr><td>FLOAT32
697 </table>
698<tr>
699 <td rowspan="3">ConvertFp16ToFp32Layer
700 <td rowspan="3" style="width:200px;"> Layer to convert Float16 tensor to Float32 tensor.
701 <td rowspan="3">
702 <ul>
703 <li>N/A
704 </ul>
705 <td>CpuRef
706 <td>
707 <ul>
708 <li>All
709 </ul>
710 <td>
711 <table>
712 <tr><th>
713 <tr><td>FLOAT16
714 <tr><td>FLOAT32
715 </table>
716<tr>
717 <td>CpuAcc
718 <td>
719 <ul>
720 <li>All
721 </ul>
722 <td>
723 <table>
724 <tr><th>
725 <tr><td>FLOAT16
726 <tr><td>FLOAT32
727 </table>
728<tr>
729 <td>GpuAcc
730 <td>
731 <ul>
732 <li>All
733 </ul>
734 <td>
735 <table>
736 <tr><th>
737 <tr><td>FLOAT16
738 <tr><td>FLOAT32
739 </table>
740<tr>
741 <td rowspan="3">ConvertFp32ToBf16Layer
742 <td rowspan="3" style="width:200px;"> Layer to convert Float32 tensor to BFloat16 tensor.
743 <td rowspan="3">
744 <ul>
745 <li>N/A
746 </ul>
747 <td>CpuRef
748 <td>
749 <ul>
750 <li>All
751 </ul>
752 <td>
753 <table>
754 <tr><th>
755 <tr><td>BFLOAT16
756 <tr><td>FLOAT32
757 </table>
758<tr>
759 <td>CpuAcc
760 <td>
761 <ul>
762 <li>All
763 </ul>
764 <td>
765 <table>
766 <tr><th>
767 <tr><td>BFLOAT16
768 <tr><td>FLOAT32
769 </table>
770<tr>
771 <td>GpuAcc
772 <td>
773 <ul>
774 <li>All
775 </ul>
776 <td>
777 <table>
778 <tr><th>
779 <tr><td>BFLOAT16
780 <tr><td>FLOAT32
781 </table>
782<tr>
783 <td rowspan="3">ConvertFp32ToFp16Layer
784 <td rowspan="3" style="width:200px;"> Layer to convert Float32 tensor to Float16 tensor.
785 <td rowspan="3">
786 <ul>
787 <li>N/A
788 </ul>
789 <td>CpuRef
790 <td>
791 <ul>
792 <li>All
793 </ul>
794 <td>
795 <table>
796 <tr><th>
797 <tr><td>FLOAT16
798 <tr><td>FLOAT32
799 </table>
800<tr>
801 <td>CpuAcc
802 <td>
803 <ul>
804 <li>All
805 </ul>
806 <td>
807 <table>
808 <tr><th>
809 <tr><td>FLOAT16
810 <tr><td>FLOAT32
811 </table>
812<tr>
813 <td>GpuAcc
814 <td>
815 <ul>
816 <li>All
817 </ul>
818 <td>
819 <table>
820 <tr><th>
821 <tr><td>FLOAT16
822 <tr><td>FLOAT32
823 </table>
824<tr>
825 <td rowspan="3">Convolution2dLayer
826 <td rowspan="3" style="width:200px;"> Layer to compute a convolution operation.
827 <td rowspan="3">
828 <ul>
829 <li>ANEURALNETWORKS_CONV_2D
830 <li>ANEURALNETWORKS_GROUPED_CONV_2D
831 </ul>
832 <td>CpuRef
833 <td>
834 <ul>
835 <li>All
836 </ul>
837 <td>
838 <table>
839 <tr><th>
840 <tr><td>BFLOAT16
841 <tr><td>FLOAT16
842 <tr><td>FLOAT32
843 <tr><td>QASYMMS8
844 <tr><td>QASYMMU8
845 <tr><td>QSYMMS16
846 </table>
847<tr>
848 <td>CpuAcc
849 <td>
850 <ul>
851 <li>NHWC
852 <li>NCHW
853 </ul>
854 <td>
855 <table>
856 <tr><th>
857 <tr><td>SIGNED32
858 <tr><td>FLOAT16
859 <tr><td>FLOAT32
860 <tr><td>QASYMMU8
861 <tr><td>QASYMMS8
862 <tr><td>QUANTIZEDSYMM8PERAXIS
863 </table>
864<tr>
865 <td>GpuAcc
866 <td>
867 <ul>
868 <li>NHWC
869 <li>NCHW
870 </ul>
871 <td>
872 <table>
873 <tr><th>
874 <tr><td>SIGNED32
875 <tr><td>FLOAT16
876 <tr><td>FLOAT32
877 <tr><td>QASYMMU8
878 <tr><td>QASYMMS8
879 <tr><td>QUANTIZEDSYMM8PERAXIS
880 </table>
881<tr>
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100882 <td rowspan="3">Convolution3dLayer
883 <td rowspan="3" style="width:200px;"> Layer to compute a 3D convolution operation.
884 <td rowspan="3">
885 <ul>
886 <li>N/A
887 </ul>
888 <td>CpuRef
889 <td>
890 <ul>
891 <li>NDHWC
892 </ul>
893 <td>
894 <table>
895 <tr><th>
896 <tr><td>BFLOAT16
897 <tr><td>FLOAT16
898 <tr><td>FLOAT32
899 <tr><td>QASYMMS8
900 <tr><td>QASYMMU8
901 <tr><td>QSYMMS8
902 <tr><td>QSYMMS16
903 </table>
904<tr>
905 <td>CpuAcc
906 <td>
907 <ul>
908 <li>N/A
909 </ul>
910 <td>
911 <ul>
912 <li>N/A
913 </ul>
914<tr>
915 <td>GpuAcc
916 <td>
917 <ul>
918 <li>N/A
919 </ul>
920 <td>
921 <ul>
922 <li>N/A
923 </ul>
924<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100925 <td rowspan="1">DebugLayer
926 <td rowspan="1" style="width:200px;"> Layer to print out inter layer tensor information.
927 <td rowspan="1">
928 <ul>
929 <li>N/A
930 </ul>
931 <td>CpuRef
932 <td>
933 <ul>
934 <li>All
935 </ul>
936 <td>
937 <table>
938 <tr><th>
939 <tr><td>BFLOAT16
940 <tr><td>FLOAT16
941 <tr><td>FLOAT32
942 <tr><td>QASYMMS8
943 <tr><td>QASYMMU8
944 <tr><td>QSYMMS8
945 <tr><td>QSYMMS16
946 <tr><td>SIGNED32
947 </table>
948<tr>
949 <td rowspan="3">DepthToSpaceLayer
950 <td rowspan="3" style="width:200px;"> Layer to perform Depth to Space transformation.
951 <td rowspan="3">
952 <ul>
953 <li>ANEURALNETWORKS_DEPTH_TO_SPACE
954 </ul>
955 <td>CpuRef
956 <td>
957 <ul>
958 <li>All
959 </ul>
960 <td>
961 <table>
962 <tr><th>
963 <tr><td>BFLOAT16
964 <tr><td>FLOAT16
965 <tr><td>FLOAT32
966 <tr><td>QASYMMS8
967 <tr><td>QASYMMU8
968 <tr><td>QSYMMS16
969 </table>
970<tr>
971 <td>CpuAcc
972 <td>
973 <ul>
974 <li>NHWC
975 <li>NCHW
976 </ul>
977 <td>
978 <table>
979 <tr><th>
980 <tr><td>All
981 </table>
982<tr>
983 <td>GpuAcc
984 <td>
985 <ul>
986 <li>NHWC
987 <li>NCHW
988 </ul>
989 <td>
990 <table>
991 <tr><th>
992 <tr><td>All
993 </table>
994<tr>
995 <td rowspan="3">DepthwiseConvolution2dLayer
996 <td rowspan="3" style="width:200px;"> Layer to compute a deconvolution or transpose convolution.
997 <td rowspan="3">
998 <ul>
999 <li>ANEURALNETWORKS_DEPTHWISE_CONV_2D
1000 </ul>
1001 <td>CpuRef
1002 <td>
1003 <ul>
1004 <li>All
1005 </ul>
1006 <td>
1007 <table>
1008 <tr><th>
1009 <tr><td>BFLOAT16
1010 <tr><td>FLOAT16
1011 <tr><td>FLOAT32
1012 <tr><td>QASYMMS8
1013 <tr><td>QASYMMU8
1014 <tr><td>QSYMMS8
1015 <tr><td>QSYMMS16
1016 </table>
1017<tr>
1018 <td>CpuAcc
1019 <td>
1020 <ul>
1021 <li>NHWC
1022 <li>NCHW
1023 </ul>
1024 <td>
1025 <table>
1026 <tr><th>
1027 <tr><td>FLOAT16
1028 <tr><td>FLOAT32
1029 <tr><td>SIGNED32
1030 <tr><td>QASYMMU8
1031 <tr><td>QASYMMS8
1032 <tr><td>QUANTIZEDSYMM8PERAXIS
1033 </table>
1034<tr>
1035 <td>GpuAcc
1036 <td>
1037 <ul>
1038 <li>NHWC
1039 <li>NCHW
1040 </ul>
1041 <td>
1042 <table>
1043 <tr><th>
1044 <tr><td>FLOAT16
1045 <tr><td>FLOAT32
1046 <tr><td>SIGNED32
1047 <tr><td>QASYMMU8
1048 <tr><td>QASYMMS8
1049 <tr><td>QUANTIZEDSYMM8PERAXIS
1050 </table>
1051<tr>
1052 <td rowspan="3">DequantizeLayer
1053 <td rowspan="3" style="width:200px;"> Layer to dequantize the values in a tensor.
1054 <td rowspan="3">
1055 <ul>
1056 <li>ANEURALNETWORKS_DEQUANTIZE
1057 </ul>
1058 <td>CpuRef
1059 <td>
1060 <ul>
1061 <li>All
1062 </ul>
1063 <td>
1064 <table>
1065 <tr><th>
1066 <tr><td>QASYMMS8
1067 <tr><td>QASYMMU8
1068 <tr><td>QSYMMS8
1069 <tr><td>QSYMMS16
1070 </table>
1071<tr>
1072 <td>CpuAcc
1073 <td>
1074 <ul>
1075 <li>All
1076 </ul>
1077 <td>
1078 <table>
1079 <tr><th>
1080 <tr><td>FLOAT16
1081 <tr><td>FLOAT32
1082 <tr><td>QASYMMU8
1083 <tr><td>QASYMMS8
1084 <tr><td>QUANTIZEDSYMM8PERAXIS
1085 <tr><td>QSYMMS8
1086 <tr><td>QSYMMS16
1087 </table>
1088<tr>
1089 <td>GpuAcc
1090 <td>
1091 <ul>
1092 <li>All
1093 </ul>
1094 <td>
1095 <table>
1096 <tr><th>
1097 <tr><td>FLOAT16
1098 <tr><td>FLOAT32
1099 <tr><td>QASYMMU8
1100 <tr><td>QASYMMS8
1101 <tr><td>QUANTIZEDSYMM8PERAXIS
1102 <tr><td>QSYMMS8
1103 <tr><td>QSYMMS16
1104 </table>
1105<tr>
1106 <td rowspan="2">DetectionPostProcessLayer
1107 <td rowspan="2" style="width:200px;"> Layer to generate the detection output based on center size encoded boxes, class prediction and anchors by doing non maximum suppression (NMS).
1108 <td rowspan="2">
1109 <ul>
1110 <li>ANEURALNETWORKS_DETECTION_POSTPROCESSING
1111 </ul>
1112 <td>CpuRef
1113 <td>
1114 <ul>
1115 <li>All
1116 </ul>
1117 <td>
1118 <table>
1119 <tr><th>
1120 <tr><td>BFLOAT16
1121 <tr><td>FLOAT16
1122 <tr><td>FLOAT32
1123 <tr><td>QASYMMS8
1124 <tr><td>QASYMMU8
1125 <tr><td>QSYMMS16
1126 </table>
1127<tr>
1128 <td>CpuAcc
1129 <td>
1130 <ul>
1131 <li>All
1132 </ul>
1133 <td>
1134 <table>
1135 <tr><th>
1136 <tr><td>QASYMMU8
1137 <tr><td>QASYMMS8
1138 <tr><td>FLOAT32
1139 </table>
1140<tr>
1141 <td rowspan="3">DivisionLayer
1142 <td rowspan="3" style="width:200px;"> Layer to divide 2 tensors.
1143 <td rowspan="3">
1144 <ul>
1145 <li>ANEURALNETWORKS_DIV
1146 </ul>
1147 <td>CpuRef
1148 <td>
1149 <ul>
1150 <li>All
1151 </ul>
1152 <td>
1153 <table>
1154 <tr><th>
1155 <tr><td>BFLOAT16
1156 <tr><td>FLOAT16
1157 <tr><td>FLOAT32
1158 <tr><td>QASYMMS8
1159 <tr><td>QASYMMU8
1160 <tr><td>QSYMMS16
1161 <tr><td>SIGNED32
1162 </table>
1163<tr>
1164 <td>CpuAcc
1165 <td>
1166 <ul>
1167 <li>All
1168 </ul>
1169 <td>
1170 <table>
1171 <tr><th>
1172 <tr><td>FLOAT16
1173 <tr><td>FLOAT32
1174 </table>
1175<tr>
1176 <td>GpuAcc
1177 <td>
1178 <ul>
1179 <li>All
1180 </ul>
1181 <td>
1182 <table>
1183 <tr><th>
1184 <tr><td>FLOAT16
1185 <tr><td>FLOAT32
1186 </table>
1187<tr>
1188 <td rowspan="3">ElementwiseBaseLayer
1189 <td rowspan="3" style="width:200px;"> Layer to perform Add - Div - Max - Min - Mul operations.
1190 <td rowspan="3">
1191 <ul>
1192 <li>ANEURALNETWORKS_ADD
1193 <li>ANEURALNETWORKS_DIV
1194 <li>ANEURALNETWORKS_MAXIMUM
1195 <li>ANEURALNETWORKS_MINIMUM
1196 <li>ANEURALNETWORKS_MUL
1197 </ul>
1198 <td>CpuRef
1199 <td>
1200 <ul>
1201 <li>All
1202 </ul>
1203 <td>
1204 <table>
1205 <tr><th>
1206 <tr><td>BFLOAT16
1207 <tr><td>FLOAT16
1208 <tr><td>FLOAT32
1209 <tr><td>QASYMMS8
1210 <tr><td>QASYMMU8
1211 <tr><td>QSYMMS16
1212 <tr><td>SIGNED32
1213 </table>
1214<tr>
1215 <td>CpuAcc
1216 <td>
1217 <ul>
1218 <li>All
1219 </ul>
1220 <td>
1221 <table>
1222 <tr><th>
1223 <tr><td>QASYMMU8
1224 <tr><td>QASYMMS8
1225 <tr><td>QSYMMS16
1226 <tr><td>SIGNED32
1227 <tr><td>FLOAT16
1228 <tr><td>FLOAT32
1229 </table>
1230<tr>
1231 <td>GpuAcc
1232 <td>
1233 <ul>
1234 <li>All
1235 </ul>
1236 <td>
1237 <table>
1238 <tr><th>
1239 <tr><td>QASYMMU8
1240 <tr><td>QASYMMS8
1241 <tr><td>QSYMMS16
1242 <tr><td>SIGNED32
1243 <tr><td>FLOAT16
1244 <tr><td>FLOAT32
1245 </table>
1246<tr>
1247 <td rowspan="3">ElementwiseUnaryLayer
1248 <td rowspan="3" style="width:200px;"> Layer to perform Rsqrt - Exp - Neg - Log - Abs - Sin - Sqrt operations.
1249 <td rowspan="3">
1250 <ul>
1251 <li>ANEURALNETWORKS_ABS
1252 <li>ANEURALNETWORKS_EXP
1253 <li>ANEURALNETWORKS_LOG
1254 <li>ANEURALNETWORKS_NEG
1255 <li>ANEURALNETWORKS_RSQRT
1256 <li>ANEURALNETWORKS_SIN
1257 <li>ANEURALNETWORKS_SQRT
1258 </ul>
1259 <td>CpuRef
1260 <td>
1261 <ul>
1262 <li>All
1263 </ul>
1264 <td>
1265 <table>
1266 <tr><th>
1267 <tr><td>BFLOAT16
1268 <tr><td>FLOAT16
1269 <tr><td>FLOAT32
1270 <tr><td>QASYMMS8
1271 <tr><td>QASYMMU8
1272 <tr><td>QSYMMS16
1273 </table>
1274<tr>
1275 <td>CpuAcc
1276 <td>
1277 <ul>
1278 <li>All
1279 </ul>
1280 <td>
1281 <table>
1282 <tr><th>
1283 <tr><td>FLOAT16
1284 <tr><td>FLOAT32
1285 <tr><td>SIGNED32
1286 </table>
1287<tr>
1288 <td>GpuAcc
1289 <td>
1290 <ul>
1291 <li>All
1292 </ul>
1293 <td>
1294 <table>
1295 <tr><th>
1296 <tr><td>FLOAT16
1297 <tr><td>FLOAT32
1298 </table>
1299<tr>
1300 <td rowspan="1">FakeQuantizationLayer
1301 <td rowspan="1" style="width:200px;"> Layer to quantize float values and dequantize afterwards. The current implementation does not dequantize the values.
1302 <td rowspan="1">
1303 <ul>
1304 <li>N/A
1305 </ul>
1306 <td>CpuRef
1307 <td>
1308 <ul>
1309 <li>All
1310 </ul>
1311 <td>
1312 <table>
1313 <tr><th>
1314 <tr><td>FLOAT32
1315 </table>
1316<tr>
1317 <td rowspan="3">FillLayer
1318 <td rowspan="3" style="width:200px;"> Layer to set the values of a tensor with a given value.
1319 <td rowspan="3">
1320 <ul>
1321 <li>ANEURALNETWORKS_FILL
1322 </ul>
1323 <td>CpuRef
1324 <td>
1325 <ul>
1326 <li>All
1327 </ul>
1328 <td>
1329 <table>
1330 <tr><th>
1331 <tr><td>FLOAT16
1332 <tr><td>FLOAT32
1333 <tr><td>SIGNED32
1334 </table>
1335<tr>
1336 <td>CpuAcc
1337 <td>
1338 <ul>
1339 <li>All
1340 </ul>
1341 <td>
1342 <table>
1343 <tr><th>
1344 <tr><td>All
1345 </table>
1346<tr>
1347 <td>GpuAcc
1348 <td>
1349 <ul>
1350 <li>All
1351 </ul>
1352 <td>
1353 <table>
1354 <tr><th>
1355 <tr><td>All
1356 </table>
1357<tr>
1358 <td rowspan="3">FloorLayer
1359 <td rowspan="3" style="width:200px;"> Layer to round the value to the lowest whole number.
1360 <td rowspan="3">
1361 <ul>
1362 <li>ANEURALNETWORKS_FLOOR
1363 </ul>
1364 <td>CpuRef
1365 <td>
1366 <ul>
1367 <li>All
1368 </ul>
1369 <td>
1370 <table>
1371 <tr><th>
1372 <tr><td>BFLOAT16
1373 <tr><td>FLOAT16
1374 <tr><td>FLOAT32
1375 </table>
1376<tr>
1377 <td>CpuAcc
1378 <td>
1379 <ul>
1380 <li>All
1381 </ul>
1382 <td>
1383 <table>
1384 <tr><th>
1385 <tr><td>FLOAT32
1386 <tr><td>FLOAT16
1387 </table>
1388<tr>
1389 <td>GpuAcc
1390 <td>
1391 <ul>
1392 <li>All
1393 </ul>
1394 <td>
1395 <table>
1396 <tr><th>
1397 <tr><td>FLOAT32
1398 <tr><td>FLOAT16
1399 </table>
1400<tr>
1401 <td rowspan="3">FullyConnectedLayer
1402 <td rowspan="3" style="width:200px;"> Layer to perform a fully connected / dense operation.
1403 <td rowspan="3">
1404 <ul>
1405 <li>ANEURALNETWORKS_FULLY_CONNECTED
1406 </ul>
1407 <td>CpuRef
1408 <td>
1409 <ul>
1410 <li>All
1411 </ul>
1412 <td>
1413 <table>
1414 <tr><th>
1415 <tr><td>BFLOAT16
1416 <tr><td>FLOAT16
1417 <tr><td>FLOAT32
1418 <tr><td>QASYMMS8
1419 <tr><td>QASYMMU8
1420 <tr><td>QSYMMS16
1421 </table>
1422<tr>
1423 <td>CpuAcc
1424 <td>
1425 <ul>
1426 <li>NHWC
1427 <li>NCHW
1428 </ul>
1429 <td>
1430 <table>
1431 <tr><th>
1432 <tr><td>SIGNED32
1433 <tr><td>FLOAT16
1434 <tr><td>FLOAT32
1435 <tr><td>QASYMMU8
1436 <tr><td>QASYMMS8
1437 </table>
1438<tr>
1439 <td>GpuAcc
1440 <td>
1441 <ul>
1442 <li>NHWC
1443 <li>NCHW
1444 </ul>
1445 <td>
1446 <table>
1447 <tr><th>
1448 <tr><td>SIGNED32
1449 <tr><td>FLOAT16
1450 <tr><td>FLOAT32
1451 <tr><td>QASYMMU8
1452 <tr><td>QASYMMS8
1453 </table>
1454<tr>
1455 <td rowspan="3">GatherLayer
1456 <td rowspan="3" style="width:200px;"> Layer to perform the gather operation along the chosen axis.
1457 <td rowspan="3">
1458 <ul>
1459 <li>ANEURALNETWORKS_GATHER
1460 </ul>
1461 <td>CpuRef
1462 <td>
1463 <ul>
1464 <li>All
1465 </ul>
1466 <td>
1467 <table>
1468 <tr><th>
1469 <tr><td>BFLOAT16
1470 <tr><td>FLOAT16
1471 <tr><td>FLOAT32
1472 <tr><td>QASYMMS8
1473 <tr><td>QASYMMU8
1474 <tr><td>QSYMMS16
1475 <tr><td>SIGNED32
1476 </table>
1477<tr>
1478 <td>CpuAcc
1479 <td>
1480 <ul>
1481 <li>All
1482 </ul>
1483 <td>
1484 <table>
1485 <tr><th>
1486 <tr><td>All
1487 </table>
1488<tr>
1489 <td>GpuAcc
1490 <td>
1491 <ul>
1492 <li>All
1493 </ul>
1494 <td>
1495 <table>
1496 <tr><th>
1497 <tr><td>All
1498 </table>
1499<tr>
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001500 <td rowspan="3">GatherNdLayer
1501 <td rowspan="3" style="width:200px;"> Layer to perform the gatherNd operation.
1502 <td rowspan="3">
1503 <ul>
1504 <li>N/A
1505 </ul>
1506 <td>CpuRef
1507 <td>
1508 <ul>
1509 <li>All
1510 </ul>
1511 <td>
1512 <table>
1513 <tr><th>
1514 <tr><td>BFLOAT16
1515 <tr><td>FLOAT16
1516 <tr><td>FLOAT32
1517 <tr><td>QASYMMS8
1518 <tr><td>QASYMMU8
1519 <tr><td>QSYMMS16
1520 <tr><td>SIGNED32
1521 </table>
1522<tr>
1523 <td>CpuAcc
1524 <td>
1525 <ul>
Teresa Charlinbd22c7d2022-04-26 18:14:12 +01001526 <li>All
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001527 </ul>
1528 <td>
Teresa Charlinbd22c7d2022-04-26 18:14:12 +01001529 <table>
1530 <tr><th>
1531 <tr><td>BFLOAT16
1532 <tr><td>FLOAT16
1533 <tr><td>FLOAT32
1534 <tr><td>QASYMMS8
1535 <tr><td>QASYMMU8
1536 <tr><td>QSYMMS16
1537 <tr><td>SIGNED32
1538 </table>
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001539<tr>
1540 <td>GpuAcc
1541 <td>
1542 <ul>
Teresa Charlin989e2f62022-04-27 16:26:11 +01001543 <li>All
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001544 </ul>
1545 <td>
Teresa Charlin989e2f62022-04-27 16:26:11 +01001546 <table>
1547 <tr><th>
1548 <tr><td>BFLOAT16
1549 <tr><td>FLOAT16
1550 <tr><td>FLOAT32
1551 <tr><td>QASYMMS8
1552 <tr><td>QASYMMU8
1553 <tr><td>QSYMMS16
1554 <tr><td>SIGNED32
1555 </table>
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001556<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01001557 <td rowspan="1">InputLayer
1558 <td rowspan="1" style="width:200px;"> Special layer used to provide input data to the computational network.
1559 <td rowspan="1">
1560 <ul>
1561 <li>N/A
1562 </ul>
1563 <td>All
1564 <td>
1565 <ul>
1566 <li>All
1567 </ul>
1568 <td>
1569 <table>
1570 <tr><th>
1571 <tr><td>All
1572 </table>
1573<tr>
1574 <td rowspan="3">InstanceNormalizationLayer
1575 <td rowspan="3" style="width:200px;"> Layer to perform an instance normalization on a given axis.
1576 <td rowspan="3">
1577 <ul>
1578 <li>ANEURALNETWORKS_INSTANCE_NORMALIZATION
1579 </ul>
1580 <td>CpuRef
1581 <td>
1582 <ul>
1583 <li>All
1584 </ul>
1585 <td>
1586 <table>
1587 <tr><th>
1588 <tr><td>BFLOAT16
1589 <tr><td>FLOAT16
1590 <tr><td>FLOAT32
1591 </table>
1592<tr>
1593 <td>CpuAcc
1594 <td>
1595 <ul>
1596 <li>NHWC
1597 <li>NCHW
1598 </ul>
1599 <td>
1600 <table>
1601 <tr><th>
1602 <tr><td>FLOAT16
1603 <tr><td>FLOAT32
1604 </table>
1605<tr>
1606 <td>GpuAcc
1607 <td>
1608 <ul>
1609 <li>NHWC
1610 <li>NCHW
1611 </ul>
1612 <td>
1613 <table>
1614 <tr><th>
1615 <tr><td>FLOAT16
1616 <tr><td>FLOAT32
1617 </table>
1618<tr>
1619 <td rowspan="3">L2NormalizationLayer
1620 <td rowspan="3" style="width:200px;"> Layer to perform an L2 normalization on a given axis.
1621 <td rowspan="3">
1622 <ul>
1623 <li>ANEURALNETWORKS_L2_NORMALIZATION
1624 </ul>
1625 <td>CpuRef
1626 <td>
1627 <ul>
1628 <li>All
1629 </ul>
1630 <td>
1631 <table>
1632 <tr><th>
1633 <tr><td>BFLOAT16
1634 <tr><td>FLOAT16
1635 <tr><td>FLOAT32
1636 <tr><td>QASYMMS8
1637 <tr><td>QASYMMU8
1638 <tr><td>QSYMMS16
1639 </table>
1640<tr>
1641 <td>CpuAcc
1642 <td>
1643 <ul>
1644 <li>NHWC
1645 <li>NCHW
1646 </ul>
1647 <td>
1648 <table>
1649 <tr><th>
1650 <tr><td>FLOAT16
1651 <tr><td>FLOAT32
1652 </table>
1653<tr>
1654 <td>GpuAcc
1655 <td>
1656 <ul>
1657 <li>NHWC
1658 <li>NCHW
1659 </ul>
1660 <td>
1661 <table>
1662 <tr><th>
1663 <tr><td>FLOAT16
1664 <tr><td>FLOAT32
1665 </table>
1666<tr>
1667 <td rowspan="3">LogSoftmaxLayer
1668 <td rowspan="3" style="width:200px;"> Layer to perform the log softmax activations given logits.
1669 <td rowspan="3">
1670 <ul>
1671 <li>N/A
1672 </ul>
1673 <td>CpuRef
1674 <td>
1675 <ul>
1676 <li>All
1677 </ul>
1678 <td>
1679 <table>
1680 <tr><th>
1681 <tr><td>BFLOAT16
1682 <tr><td>FLOAT16
1683 <tr><td>FLOAT32
1684 </table>
1685<tr>
1686 <td>CpuAcc
1687 <td>
1688 <ul>
1689 <li>All
1690 </ul>
1691 <td>
1692 <table>
1693 <tr><th>
1694 <tr><td>QASYMMU8
1695 <tr><td>QASYMMS8
1696 <tr><td>FLOAT16
1697 <tr><td>FLOAT32
1698 </table>
1699<tr>
1700 <td>GpuAcc
1701 <td>
1702 <ul>
1703 <li>All
1704 </ul>
1705 <td>
1706 <table>
1707 <tr><th>
1708 <tr><td>QASYMMU8
1709 <tr><td>QASYMMS8
1710 <tr><td>FLOAT16
1711 <tr><td>FLOAT32
1712 </table>
1713<tr>
1714 <td rowspan="3">LogicalBinaryLayer
1715 <td rowspan="3" style="width:200px;"> Layer to perform Logical AND - Logical NOT - Logical OR operations.
1716 <td rowspan="3">
1717 <ul>
1718 <li>ANEURALNETWORKS_LOGICAL_AND
1719 <li>ANEURALNETWORKS_LOGICAL_NOT
1720 <li>ANEURALNETWORKS_LOGICAL_OR
1721 </ul>
1722 <td>CpuRef
1723 <td>
1724 <ul>
1725 <li>All
1726 </ul>
1727 <td>
1728 <table>
1729 <tr><th>
1730 <tr><td>BOOLEAN
1731 </table>
1732<tr>
1733 <td>CpuAcc
1734 <td>
1735 <ul>
1736 <li>All
1737 </ul>
1738 <td>
1739 <table>
1740 <tr><th>
1741 <tr><td>BOOLEAN
1742 </table>
1743<tr>
1744 <td>GpuAcc
1745 <td>
1746 <ul>
1747 <li>All
1748 </ul>
1749 <td>
1750 <table>
1751 <tr><th>
1752 <tr><td>BOOLEAN
1753 </table>
1754<tr>
1755 <td rowspan="3">LstmLayer
1756 <td rowspan="3" style="width:200px;"> Layer to perform a single time step in a Long Short-Term Memory (LSTM) operation.
1757 <td rowspan="3">
1758 <ul>
1759 <li>ANEURALNETWORKS_LSTM
1760 </ul>
1761 <td>CpuRef
1762 <td>
1763 <ul>
1764 <li>All
1765 </ul>
1766 <td>
1767 <table>
1768 <tr><th>
1769 <tr><td>BFLOAT16
1770 <tr><td>FLOAT16
1771 <tr><td>QSYMMS16
1772 </table>
1773<tr>
1774 <td>CpuAcc
1775 <td>
1776 <ul>
1777 <li>All
1778 </ul>
1779 <td>
1780 <table>
1781 <tr><th>
1782 <tr><td>FLOAT16
1783 <tr><td>FLOAT32
1784 </table>
1785<tr>
1786 <td>GpuAcc
1787 <td>
1788 <ul>
1789 <li>All
1790 </ul>
1791 <td>
1792 <table>
1793 <tr><th>
1794 <tr><td>FLOAT16
1795 <tr><td>FLOAT32
1796 </table>
1797<tr>
1798 <td rowspan="3">MapLayer
1799 <td rowspan="3" style="width:200px;"> Layer to perform map operation on tensor.
1800 <td rowspan="3">
1801 <ul>
1802 <li>N/A
1803 </ul>
1804 <td>CpuRef
1805 <td>
1806 <ul>
1807 <li>All
1808 </ul>
1809 <td>
1810 <table>
1811 <tr><th>
1812 <tr><td>All
1813 </table>
1814<tr>
1815 <td>CpuAcc
1816 <td>
1817 <ul>
1818 <li>All
1819 </ul>
1820 <td>
1821 <table>
1822 <tr><th>
1823 <tr><td>All
1824 </table>
1825<tr>
1826 <td>GpuAcc
1827 <td>
1828 <ul>
1829 <li>All
1830 </ul>
1831 <td>
1832 <table>
1833 <tr><th>
1834 <tr><td>All
1835 </table>
1836<tr>
1837 <td rowspan="3">MaximumLayer
1838 <td rowspan="3" style="width:200px;"> Layer to perform an elementwise maximum of two tensors.
1839 <td rowspan="3">
1840 <ul>
1841 <li>N/A
1842 </ul>
1843 <td>CpuRef
1844 <td>
1845 <ul>
1846 <li>All
1847 </ul>
1848 <td>
1849 <table>
1850 <tr><th>
1851 <tr><td>BFLOAT16
1852 <tr><td>FLOAT16
1853 <tr><td>FLOAT32
1854 <tr><td>QASYMMS8
1855 <tr><td>QASYMMU8
1856 <tr><td>QSYMMS16
1857 <tr><td>SIGNED32
1858 </table>
1859<tr>
1860 <td>CpuAcc
1861 <td>
1862 <ul>
1863 <li>All
1864 </ul>
1865 <td>
1866 <table>
1867 <tr><th>
1868 <tr><td>QASYMMU8
1869 <tr><td>QASYMMS8
1870 <tr><td>FLOAT16
1871 <tr><td>FLOAT32
1872 <tr><td>SIGNED32
1873 </table>
1874<tr>
1875 <td>GpuAcc
1876 <td>
1877 <ul>
1878 <li>All
1879 </ul>
1880 <td>
1881 <table>
1882 <tr><th>
1883 <tr><td>QASYMMU8
1884 <tr><td>QASYMMS8
1885 <tr><td>QSYMMS16
1886 <tr><td>FLOAT16
1887 <tr><td>FLOAT32
1888 <tr><td>SIGNED32
1889 </table>
1890<tr>
1891 <td rowspan="3">MeanLayer
1892 <td rowspan="3" style="width:200px;"> Layer to perform reduce mean operation.
1893 <td rowspan="3">
1894 <ul>
1895 <li>ANEURALNETWORKS_MEAN
1896 </ul>
1897 <td>CpuRef
1898 <td>
1899 <ul>
1900 <li>All
1901 </ul>
1902 <td>
1903 <table>
1904 <tr><th>
1905 <tr><td>BFLOAT16
1906 <tr><td>FLOAT16
1907 <tr><td>FLOAT32
1908 <tr><td>QASYMMS8
1909 <tr><td>QASYMMU8
1910 <tr><td>QSYMMS16
1911 </table>
1912<tr>
1913 <td>CpuAcc
1914 <td>
1915 <ul>
1916 <li>All
1917 </ul>
1918 <td>
1919 <table>
1920 <tr><th>
1921 <tr><td>QASYMMU8
1922 <tr><td>QASYMMS8
1923 <tr><td>FLOAT16
1924 <tr><td>FLOAT32
1925 </table>
1926<tr>
1927 <td>GpuAcc
1928 <td>
1929 <ul>
1930 <li>All
1931 </ul>
1932 <td>
1933 <table>
1934 <tr><th>
1935 <tr><td>QASYMMU8
1936 <tr><td>QASYMMS8
1937 <tr><td>FLOAT16
1938 <tr><td>FLOAT32
1939 </table>
1940<tr>
1941 <td rowspan="3">MemCopyLayer
1942 <td rowspan="3" style="width:200px;"> Layer to perform memory copy operation.
1943 <td rowspan="3">
1944 <ul>
1945 <li>N/A
1946 </ul>
1947 <td>CpuRef
1948 <td>
1949 <ul>
1950 <li>All
1951 </ul>
1952 <td>
1953 <table>
1954 <tr><th>
1955 <tr><td>BFLOAT16
1956 <tr><td>FLOAT16
1957 <tr><td>FLOAT32
1958 <tr><td>QASYMMS8
1959 <tr><td>QASYMMU8
1960 <tr><td>QSYMMS16
1961 <tr><td>BOOLEAN
1962 </table>
1963<tr>
1964 <td>CpuAcc
1965 <td>
1966 <ul>
1967 <li>All
1968 </ul>
1969 <td>
1970 <table>
1971 <tr><th>
1972 <tr><td>All
1973 </table>
1974<tr>
1975 <td>GpuAcc
1976 <td>
1977 <ul>
1978 <li>All
1979 </ul>
1980 <td>
1981 <table>
1982 <tr><th>
1983 <tr><td>All
1984 </table>
1985<tr>
1986 <td rowspan="3">MemImportLayer
1987 <td rowspan="3" style="width:200px;"> Layer to perform memory import operation.
1988 <td rowspan="3">
1989 <ul>
1990 <li>N/A
1991 </ul>
1992 <td>CpuRef
1993 <td>
1994 <ul>
1995 <li>All
1996 </ul>
1997 <td>
1998 <table>
1999 <tr><th>
2000 <tr><td>All
2001 </table>
2002<tr>
2003 <td>CpuAcc
2004 <td>
2005 <ul>
2006 <li>All
2007 </ul>
2008 <td>
2009 <table>
2010 <tr><th>
2011 <tr><td>All
2012 </table>
2013<tr>
2014 <td>GpuAcc
2015 <td>
2016 <ul>
2017 <li>All
2018 </ul>
2019 <td>
2020 <table>
2021 <tr><th>
2022 <tr><td>All
2023 </table>
2024<tr>
2025 <td rowspan="3">MergeLayer
2026 <td rowspan="3" style="width:200px;"> Layer to concatenate tensors along a given axis.
2027 <td rowspan="3">
2028 <ul>
2029 <li>ANEURALNETWORKS_CONCATENATION
2030 </ul>
2031 <td>CpuRef
2032 <td>
2033 <ul>
2034 <li>All
2035 </ul>
2036 <td>
2037 <table>
2038 <tr><th>
2039 <tr><td>BFLOAT16
2040 <tr><td>FLOAT16
2041 <tr><td>FLOAT32
2042 <tr><td>QASYMMS8
2043 <tr><td>QASYMMU8
2044 <tr><td>QSYMMS16
2045 </table>
2046<tr>
2047 <td>CpuAcc
2048 <td>
2049 <ul>
2050 <li>All
2051 </ul>
2052 <td>
2053 <table>
2054 <tr><th>
2055 <tr><td>QASYMMU8
2056 <tr><td>QASYMMS8
2057 <tr><td>FLOAT16
2058 <tr><td>FLOAT32
2059 </table>
2060<tr>
2061 <td>GpuAcc
2062 <td>
2063 <ul>
2064 <li>All
2065 </ul>
2066 <td>
2067 <table>
2068 <tr><th>
2069 <tr><td>QASYMMU8
2070 <tr><td>QASYMMS8
2071 <tr><td>FLOAT16
2072 <tr><td>FLOAT32
2073 </table>
2074<tr>
2075 <td rowspan="3">MinimumLayer
2076 <td rowspan="3" style="width:200px;"> Layer to perform an elementwise minimum of two tensors.
2077 <td rowspan="3">
2078 <ul>
2079 <li>ANEURALNETWORKS_MINIMUM
2080 </ul>
2081 <td>CpuRef
2082 <td>
2083 <ul>
2084 <li>All
2085 </ul>
2086 <td>
2087 <table>
2088 <tr><th>
2089 <tr><td>BFLOAT16
2090 <tr><td>FLOAT16
2091 <tr><td>FLOAT32
2092 <tr><td>QASYMMS8
2093 <tr><td>QASYMMU8
2094 <tr><td>QSYMMS16
2095 <tr><td>SIGNED32
2096 </table>
2097<tr>
2098 <td>CpuAcc
2099 <td>
2100 <ul>
2101 <li>All
2102 </ul>
2103 <td>
2104 <table>
2105 <tr><th>
2106 <tr><td>QASYMMU8
2107 <tr><td>QASYMMS8
2108 <tr><td>QSYMMS16
2109 <tr><td>FLOAT16
2110 <tr><td>FLOAT32
2111 </table>
2112<tr>
2113 <td>GpuAcc
2114 <td>
2115 <ul>
2116 <li>All
2117 </ul>
2118 <td>
2119 <table>
2120 <tr><th>
2121 <tr><td>QASYMMU8
2122 <tr><td>QASYMMS8
2123 <tr><td>QSYMMS16
2124 <tr><td>FLOAT16
2125 <tr><td>FLOAT32
2126 <tr><td>SIGNED32
2127 </table>
2128<tr>
2129 <td rowspan="3">MultiplicationLayer
2130 <td rowspan="3" style="width:200px;"> Layer to perform an elementwise multiplication of two tensors.
2131 <td rowspan="3">
2132 <ul>
2133 <li>ANEURALNETWORKS_MUL
2134 </ul>
2135 <td>CpuRef
2136 <td>
2137 <ul>
2138 <li>All
2139 </ul>
2140 <td>
2141 <table>
2142 <tr><th>
2143 <tr><td>BFLOAT16
2144 <tr><td>FLOAT16
2145 <tr><td>FLOAT32
2146 <tr><td>QASYMMS8
2147 <tr><td>QASYMMU8
2148 <tr><td>QSYMMS16
2149 <tr><td>SIGNED32
2150 </table>
2151<tr>
2152 <td>CpuAcc
2153 <td>
2154 <ul>
2155 <li>All
2156 </ul>
2157 <td>
2158 <table>
2159 <tr><th>
2160 <tr><td>QASYMMU8
2161 <tr><td>QASYMMS8
2162 <tr><td>QSYMMS16
2163 <tr><td>SIGNED32
2164 <tr><td>FLOAT16
2165 <tr><td>FLOAT32
2166 </table>
2167<tr>
2168 <td>GpuAcc
2169 <td>
2170 <ul>
2171 <li>All
2172 </ul>
2173 <td>
2174 <table>
2175 <tr><th>
2176 <tr><td>QASYMMU8
2177 <tr><td>QASYMMS8
2178 <tr><td>QSYMMS16
2179 <tr><td>SIGNED32
2180 <tr><td>FLOAT16
2181 <tr><td>FLOAT32
2182 <tr><td>SIGNED32
2183 </table>
2184<tr>
2185 <td rowspan="3">NormalizationLayer
2186 <td rowspan="3" style="width:200px;"> Layer to compute normalization operation.
2187 <td rowspan="3">
2188 <ul>
2189 <li>ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION
2190 </ul>
2191 <td>CpuRef
2192 <td>
2193 <ul>
2194 <li>All
2195 </ul>
2196 <td>
2197 <table>
2198 <tr><th>
2199 <tr><td>BFLOAT16
2200 <tr><td>FLOAT16
2201 <tr><td>FLOAT32
2202 <tr><td>QASYMMS8
2203 <tr><td>QASYMMU8
2204 <tr><td>QSYMMS16
2205 </table>
2206<tr>
2207 <td>CpuAcc
2208 <td>
2209 <ul>
2210 <li>NHWC
2211 <li>NCHW
2212 </ul>
2213 <td>
2214 <table>
2215 <tr><th>
2216 <tr><td>FLOAT32
2217 <tr><td>FLOAT16
2218 </table>
2219<tr>
2220 <td>GpuAcc
2221 <td>
2222 <ul>
2223 <li>NHWC
2224 <li>NCHW
2225 </ul>
2226 <td>
2227 <table>
2228 <tr><th>
2229 <tr><td>FLOAT32
2230 <tr><td>FLOAT16
2231 </table>
2232<tr>
2233 <td rowspan="1">OutputLayer
2234 <td rowspan="1" style="width:200px;"> A special layer providing access to a user supplied buffer into which the output of a network can be written.
2235 <td rowspan="1">
2236 <ul>
2237 <li>N/A
2238 </ul>
2239 <td>All
2240 <td>
2241 <ul>
2242 <li>All
2243 </ul>
2244 <td>
2245 <table>
2246 <tr><th>
2247 <tr><td>All
2248 </table>
2249<tr>
2250 <td rowspan="3">PadLayer
2251 <td rowspan="3" style="width:200px;"> Layer to pad a tensor.
2252 <td rowspan="3">
2253 <ul>
2254 <li>ANEURALNETWORKS_PAD
2255 <li>ANEURALNETWORKS_PAD_V2
2256 </ul>
2257 <td>CpuRef
2258 <td>
2259 <ul>
2260 <li>All
2261 </ul>
2262 <td>
2263 <table>
2264 <tr><th>
2265 <tr><td>BFLOAT16
2266 <tr><td>FLOAT16
2267 <tr><td>FLOAT32
2268 <tr><td>QASYMMS8
2269 <tr><td>QASYMMU8
2270 <tr><td>QSYMMS16
2271 </table>
2272<tr>
2273 <td>CpuAcc
2274 <td>
2275 <ul>
2276 <li>NHWC
2277 <li>NCHW
2278 </ul>
2279 <td>
2280 <table>
2281 <tr><th>
2282 <tr><td>All
2283 </table>
2284<tr>
2285 <td>GpuAcc
2286 <td>
2287 <ul>
2288 <li>NHWC
2289 <li>NCHW
2290 </ul>
2291 <td>
2292 <table>
2293 <tr><th>
2294 <tr><td>All
2295 </table>
2296<tr>
2297 <td rowspan="3">PermuteLayer
2298 <td rowspan="3" style="width:200px;"> Layer to transpose an ND tensor.
2299 <td rowspan="3">
2300 <ul>
2301 <li>ANEURALNETWORKS_TRANSPOSE
2302 </ul>
2303 <td>CpuRef
2304 <td>
2305 <ul>
2306 <li>All
2307 </ul>
2308 <td>
2309 <table>
2310 <tr><th>
2311 <tr><td>BFLOAT16
2312 <tr><td>FLOAT16
2313 <tr><td>FLOAT32
2314 <tr><td>QASYMMS8
2315 <tr><td>QASYMMU8
2316 <tr><td>QSYMMS16
2317 </table>
2318<tr>
2319 <td>CpuAcc
2320 <td>
2321 <ul>
2322 <li>NHWC
2323 <li>NCHW
2324 </ul>
2325 <td>
2326 <table>
2327 <tr><th>
2328 <tr><td>All
2329 </table>
2330<tr>
2331 <td>GpuAcc
2332 <td>
2333 <ul>
2334 <li>NHWC
2335 <li>NCHW
2336 </ul>
2337 <td>
2338 <table>
2339 <tr><th>
2340 <tr><td>All
2341 </table>
2342<tr>
2343 <td rowspan="3">Pooling2dLayer
Tamás Nyíri7b885b32021-10-26 14:47:57 +01002344 <td rowspan="3" style="width:200px;"> Layer to perform 2D pooling with the specified pooling operation.
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002345 <td rowspan="3">
2346 <ul>
2347 <li>ANEURALNETWORKS_AVERAGE_POOL_2D
2348 <li>ANEURALNETWORKS_L2_POOL_2D
2349 <li>ANEURALNETWORKS_MAX_POOL_2D
2350 </ul>
2351 <td>CpuRef
2352 <td>
2353 <ul>
2354 <li>All
2355 </ul>
2356 <td>
2357 <table>
2358 <tr><th>
2359 <tr><td>BFLOAT16
2360 <tr><td>FLOAT16
2361 <tr><td>FLOAT32
2362 <tr><td>QASYMMS8
2363 <tr><td>QASYMMU8
2364 <tr><td>QSYMMS16
2365 </table>
2366<tr>
2367 <td>CpuAcc
2368 <td>
2369 <ul>
2370 <li>NHWC
2371 <li>NCHW
2372 </ul>
2373 <td>
2374 <table>
2375 <tr><th>
2376 <tr><td>QASYMMU8
2377 <tr><td>QASYMMS8
2378 <tr><td>FLOAT16
2379 <tr><td>FLOAT32
2380 </table>
2381<tr>
2382 <td>GpuAcc
2383 <td>
2384 <ul>
2385 <li>NHWC
2386 <li>NCHW
2387 </ul>
2388 <td>
2389 <table>
2390 <tr><th>
2391 <tr><td>QASYMMU8
2392 <tr><td>QASYMMS8
2393 <tr><td>FLOAT16
2394 <tr><td>FLOAT32
2395 </table>
2396<tr>
Tamás Nyíri7b885b32021-10-26 14:47:57 +01002397 <td rowspan="3">Pooling3dLayer
2398 <td rowspan="3" style="width:200px;"> Layer to perform 3D pooling with the specified pooling operation.
2399 <td rowspan="3">
2400 <ul>
2401 <li>ANEURALNETWORKS_AVERAGE_POOL_3D
2402 <li>ANEURALNETWORKS_L2_POOL_3D
2403 <li>ANEURALNETWORKS_MAX_POOL_3D
2404 </ul>
2405 <td>CpuRef
2406 <td>
2407 <ul>
2408 <li>NDHWC
2409 </ul>
2410 <td>
2411 <table>
2412 <tr><th>
2413 <tr><td>BFLOAT16
2414 <tr><td>FLOAT16
2415 <tr><td>FLOAT32
2416 <tr><td>QASYMMS8
2417 <tr><td>QASYMMU8
2418 <tr><td>QSYMMS16
2419 </table>
2420<tr>
2421 <td>CpuAcc
2422 <td>
2423 <ul>
2424 <li>NA
2425 </ul>
2426 <td>
2427<tr>
2428 <td>GpuAcc
2429 <td>
2430 <ul>
2431 <li>NDHWC
2432 </ul>
2433<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002434 <td rowspan="1">PreCompiledLayer
2435 <td rowspan="1" style="width:200px;"> Opaque layer provided by a backend which provides an executable representation of a subgraph from the original network.
2436 <td rowspan="1">
2437 <ul>
2438 <li>N/A
2439 </ul>
2440 <td>N/A
2441 <td>N/A
2442 <td>N/A
2443<tr>
2444 <td rowspan="3">PreluLayer
2445 <td rowspan="3" style="width:200px;"> Layer to compute the activation layer with the PRELU activation function.
2446 <td rowspan="3">
2447 <ul>
2448 <li>ANEURALNETWORKS_PRELU
2449 </ul>
2450 <td>CpuRef
2451 <td>
2452 <ul>
2453 <li>All
2454 </ul>
2455 <td>
2456 <table>
2457 <tr><th>
2458 <tr><td>BFLOAT16
2459 <tr><td>FLOAT16
2460 <tr><td>FLOAT32
2461 <tr><td>QASYMMS8
2462 <tr><td>QASYMMU8
2463 <tr><td>QSYMMS16
2464 </table>
2465<tr>
2466 <td>CpuAcc
2467 <td>
2468 <ul>
2469 <li>All
2470 </ul>
2471 <td>
2472 <table>
2473 <tr><th>
2474 <tr><td>QASYMMU8
2475 <tr><td>QASYMMS8
2476 <tr><td>FLOAT16
2477 <tr><td>FLOAT32
2478 </table>
2479<tr>
2480 <td>GpuAcc
2481 <td>
2482 <ul>
2483 <li>All
2484 </ul>
2485 <td>
2486 <table>
2487 <tr><th>
2488 <tr><td>QASYMMU8
2489 <tr><td>QASYMMS8
2490 <tr><td>FLOAT16
2491 <tr><td>FLOAT32
2492 </table>
2493<tr>
2494 <td rowspan="3">QLstmLayer
2495 <td rowspan="3" style="width:200px;"> Layer to perform quantized LSTM (Long Short-Term Memory) operation.
2496 <td rowspan="3">
2497 <ul>
2498 <li>ANEURALNETWORKS_QUANTIZED_LSTM
2499 <li>ANEURALNETWORKS_QUANTIZED_16BIT_LSTM
2500 </ul>
2501 <td>CpuRef
2502 <td>
2503 <ul>
2504 <li>All
2505 </ul>
2506 <td>
2507 <table>
2508 <tr><th>
2509 <tr><td>All
2510 </table>
2511<tr>
2512 <td>CpuAcc
2513 <td>
2514 <ul>
2515 <li>All
2516 </ul>
2517 <td>
2518 <table>
2519 <tr><th>
2520 <tr><td>QASYMMS8
2521 <tr><td>QASYMMU8
2522 <tr><td>SIGNED32
2523 <tr><td>QSYMMS16
2524 </table>
2525<tr>
2526 <td>GpuAcc
2527 <td>
2528 <ul>
2529 <li>All
2530 </ul>
2531 <td>
2532 <table>
2533 <tr><th>
2534 <tr><td>QASYMMS8
2535 <tr><td>QASYMMU8
2536 <tr><td>SIGNED32
2537 <tr><td>QSYMMS16
2538 </table>
2539<tr>
2540 <td rowspan="3">QuantizeLayer
2541 <td rowspan="3" style="width:200px;"> Layer to perform quantization operation.
2542 <td rowspan="3">
2543 <ul>
2544 <li>ANEURALNETWORKS_QUANTIZE
2545 </ul>
2546 <td>CpuRef
2547 <td>
2548 <ul>
2549 <li>All
2550 </ul>
2551 <td>
2552 <table>
2553 <tr><th>
2554 <tr><td>BFLOAT16
2555 <tr><td>FLOAT16
2556 <tr><td>FLOAT32
2557 <tr><td>QASYMMS8
2558 <tr><td>QASYMMU8
2559 <tr><td>QSYMMS8
2560 <tr><td>QSYMMS16
2561 </table>
2562<tr>
2563 <td>CpuAcc
2564 <td>
2565 <ul>
2566 <li>All
2567 </ul>
2568 <td>
2569 <table>
2570 <tr><th>
2571 <tr><td>QASYMMU8
2572 <tr><td>QASYMMS8
2573 <tr><td>QASYMM16
2574 <tr><td>FLOAT16
2575 <tr><td>FLOAT32
2576 </table>
2577<tr>
2578 <td>GpuAcc
2579 <td>
2580 <ul>
2581 <li>All
2582 </ul>
2583 <td>
2584 <table>
2585 <tr><th>
2586 <tr><td>QASYMMU8
2587 <tr><td>QASYMMS8
2588 <tr><td>QASYMM16
2589 <tr><td>FLOAT16
2590 <tr><td>FLOAT32
2591 </table>
2592<tr>
2593 <td rowspan="3">QuantizedLstmLayer
2594 <td rowspan="3" style="width:200px;"> Layer to perform quantized LSTM (Long Short-Term Memory) operation.
2595 <td rowspan="3">
2596 <ul>
2597 <li>ANEURALNETWORKS_QUANTIZED_LSTM
2598 <li>ANEURALNETWORKS_QUANTIZED_16BIT_LSTM
2599 </ul>
2600 <td>CpuRef
2601 <td>
2602 <ul>
2603 <li>All
2604 </ul>
2605 <td>
2606 <table>
2607 <tr><th>
2608 <tr><td>All
2609 </table>
2610<tr>
2611 <td>CpuAcc
2612 <td>
2613 <ul>
2614 <li>All
2615 </ul>
2616 <td>
2617 <table>
2618 <tr><th>
2619 <tr><td>SIGNED32
2620 <tr><td>QASYMMU8
2621 <tr><td>QSYMMS16
2622 </table>
2623<tr>
2624 <td>GpuAcc
2625 <td>
2626 <ul>
2627 <li>All
2628 </ul>
2629 <td>
2630 <table>
2631 <tr><th>
2632 <tr><td>SIGNED32
2633 <tr><td>QASYMMU8
2634 <tr><td>QSYMMS16
2635 </table>
2636<tr>
2637 <td rowspan="3">RankLayer
2638 <td rowspan="3" style="width:200px;"> Layer to perform a rank operation.
2639 <td rowspan="3">
2640 <ul>
2641 <li>ANEURALNETWORKS_RANK
2642 </ul>
2643 <td>CpuRef
2644 <td>
2645 <ul>
2646 <li>All
2647 </ul>
2648 <td>
2649 <table>
2650 <tr><th>
2651 <tr><td>All
2652 </table>
2653<tr>
2654 <td>CpuAcc
2655 <td>
2656 <ul>
2657 <li>All
2658 </ul>
2659 <td>
2660 <table>
2661 <tr><th>
2662 <tr><td>All
2663 </table>
2664<tr>
2665 <td>GpuAcc
2666 <td>
2667 <ul>
2668 <li>All
2669 </ul>
2670 <td>
2671 <table>
2672 <tr><th>
2673 <tr><td>All
2674 </table>
2675<tr>
2676 <td rowspan="3">ReduceLayer
2677 <td rowspan="3" style="width:200px;"> Layer to perform reduce with the following operations - ARG_IDX_MAX: Index of the max value - ARG_IDX_MIN: Index of the min value - MEAN_SUM: Mean of sum - PROD: Product - SUM_SQUARE: Sum of squares - SUM: Sum - MIN: Min - MAX: Max
2678 <td rowspan="3">
2679 <ul>
2680 <li>ANEURALNETWORKS_REDUCE_MAX
2681 <li>ANEURALNETWORKS_REDUCE_MIN
2682 <li>ANEURALNETWORKS_REDUCE_SUM
Teresa Charlin32b78702021-09-03 11:25:54 +01002683 <li>ANEURALNETWORKS_REDUCE_PROD
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002684 </ul>
2685 <td>CpuRef
2686 <td>
2687 <ul>
2688 <li>All
2689 </ul>
2690 <td>
2691 <table>
2692 <tr><th>
2693 <tr><td>BFLOAT16
2694 <tr><td>FLOAT16
2695 <tr><td>FLOAT32
2696 <tr><td>QASYMMS8
2697 <tr><td>QASYMMU8
2698 <tr><td>QSYMMS16
2699 <tr><td>SIGNED32
2700 </table>
2701<tr>
2702 <td>CpuAcc
2703 <td>
2704 <ul>
2705 <li>All
2706 </ul>
2707 <td>
2708 <table>
2709 <tr><th>
2710 <tr><td>QASYMMU8
2711 <tr><td>QASYMMS8
2712 <tr><td>FLOAT16
2713 <tr><td>FLOAT32
2714 <tr><td>SIGNED32
2715 </table>
2716<tr>
2717 <td>GpuAcc
2718 <td>
2719 <ul>
2720 <li>All
2721 </ul>
2722 <td>
2723 <table>
2724 <tr><th>
2725 <tr><td>QASYMMU8
2726 <tr><td>QASYMMS8
2727 <tr><td>FLOAT16
2728 <tr><td>FLOAT32
2729 <tr><td>SIGNED32
2730 </table>
2731<tr>
2732 <td rowspan="3">ReshapeLayer
2733 <td rowspan="3" style="width:200px;"> Layer to reshape a tensor.
2734 <td rowspan="3">
2735 <ul>
2736 <li>ANEURALNETWORKS_RESHAPE
2737 <li>ANEURALNETWORKS_SQUEEZE
2738 <li>ANEURALNETWORKS_EXPAND_DIMS
2739 </ul>
2740 <td>CpuRef
2741 <td>
2742 <ul>
2743 <li>All
2744 </ul>
2745 <td>
2746 <table>
2747 <tr><th>
2748 <tr><td>BFLOAT16
2749 <tr><td>FLOAT16
2750 <tr><td>FLOAT32
2751 <tr><td>QASYMMS8
2752 <tr><td>QASYMMU8
2753 <tr><td>QSYMMS16
2754 <tr><td>SIGNED32
2755 <tr><td>BOOLEAN
2756 </table>
2757<tr>
2758 <td>CpuAcc
2759 <td>
2760 <ul>
2761 <li>All
2762 </ul>
2763 <td>
2764 <table>
2765 <tr><th>
2766 <tr><td>All
2767 </table>
2768<tr>
2769 <td>GpuAcc
2770 <td>
2771 <ul>
2772 <li>All
2773 </ul>
2774 <td>
2775 <table>
2776 <tr><th>
2777 <tr><td>All
2778 </table>
2779<tr>
2780 <td rowspan="3">ResizeLayer
2781 <td rowspan="3" style="width:200px;"> Layer to perform resize of a tensor using one of the interpolation methods: - Bilinear - Nearest Neighbor.
2782 <td rowspan="3">
2783 <ul>
2784 <li>ANEURALNETWORKS_RESIZE_BILINEAR
2785 <li>ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR
2786 </ul>
2787 <td>CpuRef
2788 <td>
2789 <ul>
2790 <li>All
2791 </ul>
2792 <td>
2793 <table>
2794 <tr><th>
2795 <tr><td>BFLOAT16
2796 <tr><td>FLOAT16
2797 <tr><td>FLOAT32
2798 <tr><td>QASYMMS8
2799 <tr><td>QASYMMU8
2800 <tr><td>QSYMMS16
2801 </table>
2802<tr>
2803 <td>CpuAcc
2804 <td>
2805 <ul>
2806 <li>NHWC
2807 <li>NCHW
2808 </ul>
2809 <td>
2810 <table>
2811 <tr><th>
2812 <tr><td>QASYMMU8
2813 <tr><td>QASYMMS8
2814 <tr><td>FLOAT16
2815 <tr><td>FLOAT32
2816 </table>
2817<tr>
2818 <td>GpuAcc
2819 <td>
2820 <ul>
2821 <li>NHWC
2822 <li>NCHW
2823 </ul>
2824 <td>
2825 <table>
2826 <tr><th>
2827 <tr><td>QASYMMU8
2828 <tr><td>QASYMMS8
2829 <tr><td>FLOAT16
2830 <tr><td>FLOAT32
2831 </table>
2832<tr>
2833 <td rowspan="3">RsqrtLayer
2834 <td rowspan="3" style="width:200px;"> Layer to perform Rsqrt operation.
2835 <td rowspan="3">
2836 <ul>
2837 <li>ANEURALNETWORKS_RSQRT
2838 </ul>
2839 <td>CpuRef
2840 <td>
2841 <ul>
2842 <li>All
2843 </ul>
2844 <td>
2845 <table>
2846 <tr><th>
2847 <tr><td>BFLOAT16
2848 <tr><td>FLOAT16
2849 <tr><td>FLOAT32
2850 <tr><td>QASYMMS8
2851 <tr><td>QASYMMU8
2852 <tr><td>QSYMMS16
2853 <tr><td>SIGNED32
2854 </table>
2855<tr>
2856 <td>CpuAcc
2857 <td>
2858 <ul>
2859 <li>All
2860 </ul>
2861 <td>
2862 <table>
2863 <tr><th>
2864 <tr><td>FLOAT16
2865 <tr><td>FLOAT32
2866 <tr><td>SIGNED32
2867 </table>
2868<tr>
2869 <td>GpuAcc
2870 <td>
2871 <ul>
2872 <li>All
2873 </ul>
2874 <td>
2875 <table>
2876 <tr><th>
2877 <tr><td>FLOAT16
2878 <tr><td>FLOAT32
2879 </table>
2880<tr>
2881 <td rowspan="3">ShapeLayer
2882 <td rowspan="3" style="width:200px;"> Layer to return the shape of the input tensor.
2883 <td rowspan="3">
2884 <ul>
2885 <li>N/A
2886 </ul>
2887 <td>CpuRef
2888 <td>
2889 <ul>
2890 <li>All
2891 </ul>
2892 <td>
2893 <table>
2894 <tr><th>
2895 <tr><td>All
2896 </table>
2897<tr>
2898 <td>CpuAcc
2899 <td>
2900 <ul>
2901 <li>All
2902 </ul>
2903 <td>
2904 <table>
2905 <tr><th>
2906 <tr><td>All
2907 </table>
2908<tr>
2909 <td>GpuAcc
2910 <td>
2911 <ul>
2912 <li>All
2913 </ul>
2914 <td>
2915 <table>
2916 <tr><th>
2917 <tr><td>All
2918 </table>
2919<tr>
2920 <td rowspan="3">SliceLayer
2921 <td rowspan="3" style="width:200px;"> Layer to perform tensor slicing.
2922 <td rowspan="3">
2923 <ul>
2924 <li>ANEURALNETWORKS_SLICE
2925 </ul>
2926 <td>CpuRef
2927 <td>
2928 <ul>
2929 <li>All
2930 </ul>
2931 <td>
2932 <table>
2933 <tr><th>
2934 <tr><td>BFLOAT16
2935 <tr><td>FLOAT32
2936 <tr><td>QASYMMS8
2937 <tr><td>QASYMMU8
2938 <tr><td>QSYMMS16
2939 </table>
2940<tr>
2941 <td>CpuAcc
2942 <td>
2943 <ul>
2944 <li>All
2945 </ul>
2946 <td>
2947 <table>
2948 <tr><th>
2949 <tr><td>All
2950 </table>
2951<tr>
2952 <td>GpuAcc
2953 <td>
2954 <ul>
2955 <li>All
2956 </ul>
2957 <td>
2958 <table>
2959 <tr><th>
2960 <tr><td>All
2961 </table>
2962<tr>
2963 <td rowspan="3">SoftmaxLayer
2964 <td rowspan="3" style="width:200px;"> Layer to perform softmax, log-softmax operation over the specified axis.
2965 <td rowspan="3">
2966 <ul>
2967 <li>ANEURALNETWORKS_LOG_SOFTMAX
2968 <li>ANEURALNETWORKS_SOFTMAX
2969 </ul>
2970 <td>CpuRef
2971 <td>
2972 <ul>
2973 <li>All
2974 </ul>
2975 <td>
2976 <table>
2977 <tr><th>
2978 <tr><td>BFLOAT16
2979 <tr><td>FLOAT16
2980 <tr><td>FLOAT32
2981 <tr><td>QASYMMS8
2982 <tr><td>QASYMMU8
2983 <tr><td>QSYMMS8
2984 <tr><td>QSYMMS16
2985 </table>
2986<tr>
2987 <td>CpuAcc
2988 <td>
2989 <ul>
2990 <li>All
2991 </ul>
2992 <td>
2993 <table>
2994 <tr><th>
2995 <tr><td>QASYMMU8
2996 <tr><td>QASYMMS8
2997 <tr><td>FLOAT16
2998 <tr><td>FLOAT32
2999 </table>
3000<tr>
3001 <td>GpuAcc
3002 <td>
3003 <ul>
3004 <li>All
3005 </ul>
3006 <td>
3007 <table>
3008 <tr><th>
3009 <tr><td>QASYMMU8
3010 <tr><td>QASYMMS8
3011 <tr><td>FLOAT16
3012 <tr><td>FLOAT32
3013 </table>
3014<tr>
3015 <td rowspan="3">SpaceToBatchNdLayer
3016 <td rowspan="3" style="width:200px;"> Layer to divide spatial dimensions of the tensor into a grid of blocks and interleaves these blocks with the batch dimension.
3017 <td rowspan="3">
3018 <ul>
3019 <li>ANEURALNETWORKS_SPACE_TO_BATCH_ND
3020 </ul>
3021 <td>CpuRef
3022 <td>
3023 <ul>
3024 <li>All
3025 </ul>
3026 <td>
3027 <table>
3028 <tr><th>
3029 <tr><td>BFLOAT16
3030 <tr><td>FLOAT16
3031 <tr><td>FLOAT32
3032 <tr><td>QASYMMS8
3033 <tr><td>QASYMMU8
3034 <tr><td>QSYMMS16
3035 </table>
3036<tr>
3037 <td>CpuAcc
3038 <td>
3039 <ul>
3040 <li>NHWC
3041 <li>NCHW
3042 </ul>
3043 <td>
3044 <table>
3045 <tr><th>
3046 <tr><td>All
3047 </table>
3048<tr>
3049 <td>GpuAcc
3050 <td>
3051 <ul>
3052 <li>NHWC
3053 <li>NCHW
3054 </ul>
3055 <td>
3056 <table>
3057 <tr><th>
3058 <tr><td>All
3059 </table>
3060<tr>
3061 <td rowspan="3">SpaceToDepthLayer
3062 <td rowspan="3" style="width:200px;"> Layer to rearrange blocks of spatial data into depth.
3063 <td rowspan="3">
3064 <ul>
3065 <li>ANEURALNETWORKS_SPACE_TO_DEPTH
3066 </ul>
3067 <td>CpuRef
3068 <td>
3069 <ul>
3070 <li>All
3071 </ul>
3072 <td>
3073 <table>
3074 <tr><th>
3075 <tr><td>BFLOAT16
3076 <tr><td>FLOAT16
3077 <tr><td>FLOAT32
3078 <tr><td>QASYMMS8
3079 <tr><td>QASYMMU8
3080 <tr><td>QSYMMS16
3081 </table>
3082<tr>
3083 <td>CpuAcc
3084 <td>
3085 <ul>
3086 <li>NHWC
3087 <li>NCHW
3088 </ul>
3089 <td>
3090 <table>
3091 <tr><th>
3092 <tr><td>All
3093 </table>
3094<tr>
3095 <td>GpuAcc
3096 <td>
3097 <ul>
3098 <li>NHWC
3099 <li>NCHW
3100 </ul>
3101 <td>
3102 <table>
3103 <tr><th>
3104 <tr><td>All
3105 </table>
3106<tr>
3107 <td rowspan="3">SplitterLayer
3108 <td rowspan="3" style="width:200px;"> Layer to split a tensor along a given axis.
3109 <td rowspan="3">
3110 <ul>
3111 <li>ANEURALNETWORKS_SPLIT
3112 </ul>
3113 <td>CpuRef
3114 <td>
3115 <ul>
3116 <li>All
3117 </ul>
3118 <td>
3119 <table>
3120 <tr><th>
3121 <tr><td>BFLOAT16
3122 <tr><td>FLOAT16
3123 <tr><td>FLOAT32
3124 <tr><td>QASYMMS8
3125 <tr><td>QASYMMU8
3126 <tr><td>QSYMMS16
3127 </table>
3128<tr>
3129 <td>CpuAcc
3130 <td>
3131 <ul>
3132 <li>All
3133 </ul>
3134 <td>
3135 <table>
3136 <tr><th>
3137 <tr><td>All
3138 </table>
3139<tr>
3140 <td>GpuAcc
3141 <td>
3142 <ul>
3143 <li>All
3144 </ul>
3145 <td>
3146 <table>
3147 <tr><th>
3148 <tr><td>All
3149 </table>
3150<tr>
3151 <td rowspan="3">StackLayer
3152 <td rowspan="3" style="width:200px;"> Layer to stack tensors along an axis.
3153 <td rowspan="3">
3154 <ul>
3155 <li>N/A
3156 </ul>
3157 <td>CpuRef
3158 <td>
3159 <ul>
3160 <li>All
3161 </ul>
3162 <td>
3163 <table>
3164 <tr><th>
3165 <tr><td>BFLOAT16
3166 <tr><td>FLOAT16
3167 <tr><td>FLOAT32
3168 <tr><td>QASYMMS8
3169 <tr><td>QASYMMU8
3170 <tr><td>QSYMMS16
3171 </table>
3172<tr>
3173 <td>CpuAcc
3174 <td>
3175 <ul>
3176 <li>All
3177 </ul>
3178 <td>
3179 <table>
3180 <tr><th>
3181 <tr><td>All
3182 </table>
3183<tr>
3184 <td>GpuAcc
3185 <td>
3186 <ul>
3187 <li>All
3188 </ul>
3189 <td>
3190 <table>
3191 <tr><th>
3192 <tr><td>All
3193 </table>
3194<tr>
3195 <td rowspan="1">StandInLayer
3196 <td rowspan="1" style="width:200px;"> A layer to represent "unknown" or "unsupported" operations in the input graph. It has a configurable number of input and output slots and an optional name.
3197 <td rowspan="1">
3198 <ul>
3199 <li>N/A
3200 </ul>
3201 <td>N/A
3202 <td>N/A
3203 <td>N/A
3204<tr>
3205 <td rowspan="3">StridedSliceLayer
3206 <td rowspan="3" style="width:200px;"> Layer to extract a strided slice of a tensor.
3207 <td rowspan="3">
3208 <ul>
3209 <li>ANEURALNETWORKS_STRIDED_SLICE
3210 </ul>
3211 <td>CpuRef
3212 <td>
3213 <ul>
3214 <li>All
3215 </ul>
3216 <td>
3217 <table>
3218 <tr><th>
3219 <tr><td>BFLOAT16
3220 <tr><td>FLOAT32
3221 <tr><td>QASYMMS8
3222 <tr><td>QASYMMU8
3223 <tr><td>QSYMMS16
3224 </table>
3225<tr>
3226 <td>CpuAcc
3227 <td>
3228 <ul>
3229 <li>All
3230 </ul>
3231 <td>
3232 <table>
3233 <tr><th>
3234 <tr><td>All
3235 </table>
3236<tr>
3237 <td>GpuAcc
3238 <td>
3239 <ul>
3240 <li>All
3241 </ul>
3242 <td>
3243 <table>
3244 <tr><th>
3245 <tr><td>All
3246 </table>
3247<tr>
3248 <td rowspan="3">SubtractionLayer
3249 <td rowspan="3" style="width:200px;"> Layer to perform an elementwise subtract of 2 tensors.
3250 <td rowspan="3">
3251 <ul>
3252 <li>ANEURALNETWORKS_SUB
3253 </ul>
3254 <td>CpuRef
3255 <td>
3256 <ul>
3257 <li>All
3258 </ul>
3259 <td>
3260 <table>
3261 <tr><th>
3262 <tr><td>BFLOAT16
3263 <tr><td>FLOAT16
3264 <tr><td>FLOAT32
3265 <tr><td>QASYMMS8
3266 <tr><td>QASYMMU8
3267 <tr><td>QSYMMS16
3268 <tr><td>SIGNED32
3269 </table>
3270<tr>
3271 <td>CpuAcc
3272 <td>
3273 <ul>
3274 <li>All
3275 </ul>
3276 <td>
3277 <table>
3278 <tr><th>
3279 <tr><td>QASYMMU8
3280 <tr><td>QASYMMS8
3281 <tr><td>QSYMMS16
3282 <tr><td>SIGNED32
3283 <tr><td>FLOAT16
3284 <tr><td>FLOAT32
3285 </table>
3286<tr>
3287 <td>GpuAcc
3288 <td>
3289 <ul>
3290 <li>All
3291 </ul>
3292 <td>
3293 <table>
3294 <tr><th>
3295 <tr><td>QASYMMU8
3296 <tr><td>QASYMMS8
3297 <tr><td>QSYMMS16
3298 <tr><td>SIGNED32
3299 <tr><td>FLOAT16
3300 <tr><td>FLOAT32
3301 </table>
3302<tr>
3303 <td rowspan="3">TransposeConvolution2dLayer
3304 <td rowspan="3" style="width:200px;"> Layer to perform 2D transpose convolution (deconvolution) operation.
3305 <td rowspan="3">
3306 <ul>
3307 <li>ANEURALNETWORKS_TRANSPOSE_CONV_2D
3308 </ul>
3309 <td>CpuRef
3310 <td>
3311 <ul>
3312 <li>All
3313 </ul>
3314 <td>
3315 <table>
3316 <tr><th>
3317 <tr><td>BFLOAT16
3318 <tr><td>FLOAT16
3319 <tr><td>FLOAT32
3320 <tr><td>QASYMMS8
3321 <tr><td>QASYMMU8
3322 <tr><td>QSYMMS8
3323 <tr><td>QSYMMS16
3324 </table>
3325<tr>
3326 <td>CpuAcc
3327 <td>
3328 <ul>
3329 <li>NHWC
3330 <li>NCHW
3331 </ul>
3332 <td>
3333 <table>
3334 <tr><th>
3335 <tr><td>SIGNED32
3336 <tr><td>FLOAT16
3337 <tr><td>FLOAT32
3338 <tr><td>QASYMMU8
3339 <tr><td>QASYMMS8
3340 <tr><td>QUANTIZEDSYMM8PERAXIS
3341 </table>
3342<tr>
3343 <td>GpuAcc
3344 <td>
3345 <ul>
3346 <li>NHWC
3347 <li>NCHW
3348 </ul>
3349 <td>
3350 <table>
3351 <tr><th>
3352 <tr><td>SIGNED32
3353 <tr><td>FLOAT16
3354 <tr><td>FLOAT32
3355 <tr><td>QASYMMU8
3356 <tr><td>QASYMMS8
3357 <tr><td>QUANTIZEDSYMM8PERAXIS
3358 </table>
3359<tr>
3360 <td rowspan="3">TransposeLayer
3361 <td rowspan="3" style="width:200px;"> Layer to transpose a tensor.
3362 <td rowspan="3">
3363 <ul>
3364 <li>ANEURALNETWORKS_TRANSPOSE
3365 </ul>
3366 <td>CpuRef
3367 <td>
3368 <ul>
3369 <li>All
3370 </ul>
3371 <td>
3372 <table>
3373 <tr><th>
3374 <tr><td>BFLOAT16
3375 <tr><td>FLOAT16
3376 <tr><td>FLOAT32
3377 <tr><td>QASYMMS8
3378 <tr><td>QASYMMU8
3379 <tr><td>QSYMMS16
3380 </table>
3381<tr>
3382 <td>CpuAcc
3383 <td>
3384 <ul>
3385 <li>All
3386 </ul>
3387 <td>
3388 <table>
3389 <tr><th>
3390 <tr><td>All
3391 </table>
3392<tr>
3393 <td>GpuAcc
3394 <td>
3395 <ul>
3396 <li>All
3397 </ul>
3398 <td>
3399 <table>
3400 <tr><th>
3401 <tr><td>All
3402 </table>
3403<tr>
3404 <td rowspan="3">UnidirectionalSquenceLstmLayer
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01003405 <td rowspan="3" style="width:200px;"> Layer to perform unidirectional sequence LSTM operation.
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01003406 <td rowspan="3">
3407 <ul>
3408 <li>ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM
3409 </ul>
3410 <td>CpuRef
3411 <td>
3412 <ul>
3413 <li>All
3414 </ul>
3415 <td>
3416 <table>
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01003417 <tr><th>Input Types
3418 <tr><td>FLOAT32
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01003419 </table>
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01003420 <table>
3421 <tr><th>Weight Types
3422 <tr><td>FLOAT32
3423 <tr><td>QASYMMS8
3424 </table>
Cathal Corbettfd5bec42022-03-03 15:13:23 +00003425 <td>CpuAcc
3426 <td>
3427 <ul>
3428 <li>All
3429 </ul>
3430 <td>
3431 <table>
3432 <tr><th>Input Types
3433 <tr><td>FLOAT32
3434 </table>
3435 <table>
3436 <tr><th>Weight Types
3437 <tr><td>FLOAT32
3438 </table>
Cathal Corbett4952a3e2022-03-03 15:14:18 +00003439 <td>GpuAcc
3440 <td>
3441 <ul>
3442 <li>All
3443 </ul>
3444 <td>
3445 <table>
3446 <tr><th>Input Types
3447 <tr><td>FLOAT32
3448 </table>
3449 <table>
3450 <tr><th>Weight Types
3451 <tr><td>FLOAT32
3452 </table>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01003453<tr>
3454 <td rowspan="3">UnmapLayer
3455 <td rowspan="3" style="width:200px;"> Layer to perform unmap operation on tensor.
3456 <td rowspan="3">
3457 <ul>
3458 <li>N/A
3459 </ul>
3460 <td>CpuRef
3461 <td>
3462 <ul>
3463 <li>All
3464 </ul>
3465 <td>
3466 <table>
3467 <tr><th>
3468 <tr><td>All
3469 </table>
3470<tr>
3471 <td>CpuAcc
3472 <td>
3473 <ul>
3474 <li>NHWC
3475 <li>NCHW
3476 </ul>
3477 <td>
3478 <table>
3479 <tr><th>
3480 <tr><td>All
3481 </table>
3482<tr>
3483 <td>GpuAcc
3484 <td>
3485 <ul>
3486 <li>NHWC
3487 <li>NCHW
3488 </ul>
3489 <td>
3490 <table>
3491 <tr><th>
3492 <tr><td>All
3493 </table>
3494</table>
3495
3496*/
3497} // namespace