blob: 3a902c8883d1efc983799c17ce92f465d4070629 [file] [log] [blame]
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01001/// Copyright (c) 2021 ARM Limited and Contributors. All rights reserved.
2///
3/// SPDX-License-Identifier: MIT
4///
5
6namespace armnn
7{
8/**
9@page operator_list Arm NN Operators
10
11@tableofcontents
12
13@section S5_1_operator_list Arm NN Operators
14
15Arm NN supports operators that are listed in below table.
16
17Arm NN supports a wide list of data-types.
18The main data-types that the Machine Learning functions support are the following:
19 <ul>
20 <li><b>BFLOAT16:</b> 16-bit non-standard brain floating point
21 <li><b>QASYMMU8:</b> 8-bit unsigned asymmetric quantized
22 <li><b>QASYMMS8:</b> 8-bit signed asymmetric quantized
23 <li><b>QUANTIZEDSYMM8PERAXIS:</b> 8-bit signed symmetric quantized
Cathal Corbettb85113e2022-02-22 11:51:43 +000024 <li><b>QSYMMS8:</b> 8-bit signed symmetric quantized
25 <li><b>QSYMMS16:</b> 16-bit signed symmetric quantized
Sadik Armagan1a9c9f62021-08-05 09:25:15 +010026 <li><b>FLOAT32:</b> 32-bit single precision floating point
27 <li><b>FLOAT16:</b> 16-bit half precision floating point
28 <li><b>SIGNED32:</b> 32-bit signed integer
29 <li><b>BOOLEAN:</b> 8-bit unsigned char
30 <li><b>All:</b> Agnostic to any specific data type
31 </ul>
32
33Arm NN supports the following data layouts (fast changing dimension from right to left):
34 <ul>
35 <li><b>NHWC:</b> Layout where channels are in the fastest changing dimension
36 <li><b>NCHW:</b> Layout where width is in the fastest changing dimension
37 <li><b>All:</b> Agnostic to any specific data layout
38 </ul>
39where N = batches, C = channels, H = height, W = width
40
41<table>
42<caption id="multi_row"></caption>
43<tr>
44 <th>Operator
45 <th>Description
46 <th>Equivalent Android NNAPI Operator
47 <th>Backends
48 <th>Data Layouts
49 <th>Data Types
50<tr>
51 <td rowspan="3">AbsLayer
52 <td rowspan="3"> Layer to perform absolute operation.
53 <td rowspan="3">
54 <ul>
55 <li>ANEURALNETWORKS_ABS
56 </ul>
57 <td>CpuRef
58 <td>
59 <ul>
60 <li>All
61 </ul>
62 <td>
63 <table>
64 <tr><th>
65 <tr><td>BFLOAT16
66 <tr><td>FLOAT16
67 <tr><td>FLOAT32
68 <tr><td>QASYMMS8
69 <tr><td>QASYMMU8
70 <tr><td>QSYMMS16
71 <tr><td>SIGNED32
72 </table>
73<tr>
74 <td>CpuAcc
75 <td>
76 <ul>
77 <li>All
78 </ul>
79 <td>
80 <table>
81 <tr><th>
82 <tr><td>FLOAT16
83 <tr><td>FLOAT32
84 <tr><td>SIGNED32
85 </table>
86<tr>
87 <td>GpuAcc
88 <td>
89 <ul>
90 <li>All
91 </ul>
92 <td>
93 <table>
94 <tr><th>
95 <tr><td>FLOAT16
96 <tr><td>FLOAT32
97 </table>
98<tr>
99 <td rowspan="3">ActivationLayer
100 <td rowspan="3" style="width:200px;"> Layer to simulate an activation layer with the specified activation function.
101 <td rowspan="3">
102 <ul>
103 <li>ANEURALNETWORKS_ABS
104 <li>ANEURALNETWORKS_ELU
105 <li>ANEURALNETWORKS_HARD_SWISH
106 <li>ANEURALNETWORKS_LOGISTIC
107 <li>ANEURALNETWORKS_PRELU
108 <li>ANEURALNETWORKS_RELU
109 <li>ANEURALNETWORKS_RELU1
110 <li>ANEURALNETWORKS_RELU6
111 <li>ANEURALNETWORKS_SQRT
112 <li>ANEURALNETWORKS_TANH
113 </ul>
114 <td>CpuRef
115 <td>
116 <ul>
117 <li>All
118 </ul>
119 <td>
120 <table>
121 <tr><th>
122 <tr><td>BFLOAT16
123 <tr><td>FLOAT16
124 <tr><td>FLOAT32
125 <tr><td>QASYMMS8
126 <tr><td>QASYMMU8
127 <tr><td>QSYMMS16
128 </table>
129<tr>
130 <td>CpuAcc
131 <td>
132 <ul>
133 <li>All
134 </ul>
135 <td>
136 <table>
137 <tr><th>
138 <tr><td>QASYMMU8
139 <tr><td>QASYMMS8
140 <tr><td>QSYMMS16
141 <tr><td>FLOAT16
142 <tr><td>FLOAT32
143 </table>
144<tr>
145 <td>GpuAcc
146 <td>
147 <ul>
148 <li>All
149 </ul>
150 <td>
151 <table>
152 <tr><th>
153 <tr><td>QASYMMU8
154 <tr><td>QASYMMS8
155 <tr><td>QSYMMS16
156 <tr><td>FLOAT16
157 <tr><td>FLOAT32
158 </table>
159<tr>
160 <td rowspan="3">AdditionLayer
161 <td rowspan="3" style="width:200px;"> Layer to add 2 tensors.
162 <td rowspan="3">
163 <ul>
164 <li>ANEURALNETWORKS_ADD
165 </ul>
166 <td>CpuRef
167 <td>
168 <ul>
169 <li>All
170 </ul>
171 <td>
172 <table>
173 <tr><th>
174 <tr><td>BFLOAT16
175 <tr><td>FLOAT16
176 <tr><td>FLOAT32
177 <tr><td>QASYMMS8
178 <tr><td>QASYMMU8
179 <tr><td>QSYMMS16
180 <tr><td>SIGNED32
181 </table>
182<tr>
183 <td>CpuAcc
184 <td>
185 <ul>
186 <li>All
187 </ul>
188 <td>
189 <table>
190 <tr><th>
191 <tr><td>QASYMMU8
192 <tr><td>QASYMMS8
193 <tr><td>QSYMMS16
194 <tr><td>SIGNED32
195 <tr><td>FLOAT16
196 <tr><td>FLOAT32
197 </table>
198<tr>
199 <td>GpuAcc
200 <td>
201 <ul>
202 <li>All
203 </ul>
204 <td>
205 <table>
206 <tr><th>
207 <tr><td>QASYMMU8
208 <tr><td>QASYMMS8
209 <tr><td>QSYMMS16
210 <tr><td>SIGNED32
211 <tr><td>FLOAT16
212 <tr><td>FLOAT32
213 </table>
214<tr>
215 <td rowspan="3">ArgMinMaxLayer
216 <td rowspan="3" style="width:200px;"> Layer to calculate the index of the minimum or maximum values in a tensor
217 based on an axis.
218 <td rowspan="3">
219 <ul>
220 <li>ANEURALNETWORKS_ARGMAX
221 <li>ANEURALNETWORKS_ARGMIN
222 </ul>
223 <td>CpuRef
224 <td>
225 <ul>
226 <li>All
227 </ul>
228 <td>
229 <table>
230 <tr><th>
231 <tr><td>BFLOAT16
232 <tr><td>FLOAT16
233 <tr><td>FLOAT32
234 <tr><td>QASYMMS8
235 <tr><td>QASYMMU8
236 <tr><td>QSYMMS16
237 <tr><td>SIGNED32
238 <tr><td>SIGNED64
239 </table>
240<tr>
241 <td>CpuAcc
242 <td>
243 <ul>
244 <li>All
245 </ul>
246 <td>
247 <table>
248 <tr><th>
249 <tr><td>QASYMMU8
250 <tr><td>QASYMMS8
251 <tr><td>SIGNED32
252 <tr><td>FLOAT16
253 <tr><td>FLOAT32
254 </table>
255<tr>
256 <td>GpuAcc
257 <td>
258 <ul>
259 <li>All
260 </ul>
261 <td>
262 <table>
263 <tr><th>
264 <tr><td>QASYMMU8
265 <tr><td>QASYMMS8
266 <tr><td>SIGNED32
267 <tr><td>FLOAT16
268 <tr><td>FLOAT32
269 </table>
270<tr>
Samuel Yap6b478092022-07-06 15:36:03 +0100271 <td rowspan="3">BatchMatMulLayer
272 <td rowspan="3" style="width:200px;"> Layer to perform batch matrix multiplication.
273 <td rowspan="3">
274 <ul>
275 <li>N/A
276 </ul>
277 <td>CpuRef
278 <td>
279 <ul>
280 <li>All
281 </ul>
282 <td>
283 <table>
284 <tr><th>
285 <tr><td>BFLOAT16
286 <tr><td>FLOAT16
287 <tr><td>FLOAT32
288 <tr><td>QASYMMS8
289 <tr><td>QASYMMU8
290 <tr><td>QSYMMS16
291 </table>
292<tr>
293 <td>CpuAcc
294 <td>
295 <ul>
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100296 <li>All
Samuel Yap6b478092022-07-06 15:36:03 +0100297 </ul>
298 <td>
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100299 <table>
300 <tr><th>
301 <tr><td>FLOAT32
302 </table>
Samuel Yap6b478092022-07-06 15:36:03 +0100303<tr>
304 <td>GpuAcc
305 <td>
306 <ul>
307 <li>N/A
308 </ul>
309 <td>
310 <ul>
311 <li>N/A
312 </ul>
313<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100314 <td rowspan="3">BatchNormalizationLayer
315 <td rowspan="3" style="width:200px;"> Layer to perform batch normalization.
316 <td rowspan="3">
317 <ul>
318 <li>N/A
319 </ul>
320 <td>CpuRef
321 <td>
322 <ul>
323 <li>All
324 </ul>
325 <td>
326 <table>
327 <tr><th>
328 <tr><td>BFLOAT16
329 <tr><td>FLOAT16
330 <tr><td>FLOAT32
331 <tr><td>QASYMMS8
332 <tr><td>QASYMMU8
333 <tr><td>QSYMMS16
334 </table>
335<tr>
336 <td>CpuAcc
337 <td>
338 <ul>
339 <li>NHWC
340 <li>NCHW
341 </ul>
342 <td>
343 <table>
344 <tr><th>
345 <tr><td>FLOAT32
346 <tr><td>FLOAT16
347 </table>
348<tr>
349 <td>GpuAcc
350 <td>
351 <ul>
352 <li>NHWC
353 <li>NCHW
354 </ul>
355 <td>
356 <table>
357 <tr><th>
358 <tr><td>FLOAT32
359 <tr><td>FLOAT16
360 </table>
361<tr>
362 <td rowspan="3">BatchToSpaceNdLayer
363 <td rowspan="3" style="width:200px;"> Layer to perform a batch to space transformation.
364 <td rowspan="3">
365 <ul>
366 <li>ANEURALNETWORKS_BATCH_TO_SPACE_ND
367 </ul>
368 <td>CpuRef
369 <td>
370 <ul>
371 <li>All
372 </ul>
373 <td>
374 <table>
375 <tr><th>
376 <tr><td>BFLOAT16
377 <tr><td>FLOAT16
378 <tr><td>FLOAT32
379 <tr><td>QASYMMS8
380 <tr><td>QASYMMU8
381 <tr><td>QSYMMS16
382 </table>
383<tr>
384 <td>CpuAcc
385 <td>
386 <ul>
387 <li>NHWC
388 <li>NCHW
389 </ul>
390 <td>
391 <table>
392 <tr><th>
393 <tr><td>All
394 </table>
395<tr>
396 <td>GpuAcc
397 <td>
398 <ul>
399 <li>NHWC
400 <li>NCHW
401 </ul>
402 <td>
403 <table>
404 <tr><th>
405 <tr><td>All
406 </table>
407<tr>
408 <td rowspan="3">CastLayer
409 <td rowspan="3" style="width:200px;"> Layer to cast a tensor to a type.
410 <td rowspan="3">
411 <ul>
412 <li>ANEURALNETWORKS_CAST
413 </ul>
414 <td>CpuRef
415 <td>
416 <ul>
417 <li>All
418 </ul>
419 <td>
420 <table>
421 <tr><th>
422 <tr><td>BFLOAT16
423 <tr><td>FLOAT16
424 <tr><td>FLOAT32
425 <tr><td>QSYMMS8
426 <tr><td>QASYMMS8
427 <tr><td>QASYMMU8
428 <tr><td>QSYMMS16
429 <tr><td>SIGNED32
430 </table>
431<tr>
432 <td>CpuAcc
433 <td>
434 <ul>
435 <li>All
436 </ul>
437 <td>
438 <table>
439 <tr><th>
440 <tr><td>QASYMMS8
441 <tr><td>QASYMMU8
442 <tr><td>FLOAT16
443 <tr><td>SIGNED32
444 <tr><td>FLOAT32
445 </table>
446<tr>
447 <td>GpuAcc
448 <td>
449 <ul>
450 <li>All
451 </ul>
452 <td>
453 <table>
454 <tr><th>
455 <tr><td>QASYMMS8
456 <tr><td>QASYMMU8
457 <tr><td>SIGNED32
458 <tr><td>FLOAT16
459 <tr><td>FLOAT32
460 </table>
461<tr>
Teresa Charlincd203852021-09-24 18:15:39 +0100462 <td rowspan="3">ChannelShuffleLayer
463 <td rowspan="3" style="width:200px;"> Layer to reorganize the channels of a tensor.
464 <td rowspan="3">
465 <ul>
466 <li>ANEURALNETWORKS_CHANNEL_SHUFFLE
467 </ul>
468 <td>CpuRef
469 <td>
470 <ul>
471 <li>All
472 </ul>
473 <td>
474 <table>
475 <tr><th>
476 <tr><td>FLOAT16
477 <tr><td>FLOAT32
478 <tr><td>QSYMMS8
479 <tr><td>QASYMMS8
480 <tr><td>QASYMMU8
481 </table>
482<tr>
483 <td>CpuAcc
484 <td>
485 <ul>
486 <li>All
487 </ul>
488 <td>
489 <table>
490 <tr><th>
491 <tr><td>QASYMMS8
492 <tr><td>QASYMMU8
493 <tr><td>FLOAT16
494 <tr><td>FLOAT32
495 </table>
496<tr>
497 <td>GpuAcc
498 <td>
499 <ul>
500 <li>All
501 </ul>
502 <td>
503 <table>
504 <tr><th>
505 <tr><td>QASYMMS8
506 <tr><td>QASYMMU8
507 <tr><td>FLOAT16
508 <tr><td>FLOAT32
509 </table>
510<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100511 <td rowspan="3">ComparisonLayer
512 <td rowspan="3" style="width:200px;"> Layer to compare 2 tensors.
513 <td rowspan="3">
514 <ul>
515 <li>ANEURALNETWORKS_EQUAL
516 <li>ANEURALNETWORKS_GREATER
517 <li>ANEURALNETWORKS_GREATER_EQUAL
518 <li>ANEURALNETWORKS_LESS
519 <li>ANEURALNETWORKS_LESS_EQUAL
520 <li>ANEURALNETWORKS_NOT_EQUAL
521 </ul>
522 <td>CpuRef
523 <td>
524 <ul>
525 <li>All
526 </ul>
527 <td>
528 <table>
529 <tr><th>
530 <tr><td>BFLOAT16
531 <tr><td>FLOAT16
532 <tr><td>FLOAT32
533 <tr><td>BOOLEAN
534 <tr><td>QASYMMS8
535 <tr><td>QASYMMU8
536 <tr><td>QSYMMS16
537 <tr><td>SIGNED32
538 </table>
539<tr>
540 <td>CpuAcc
541 <td>
542 <ul>
543 <li>All
544 </ul>
545 <td>
546 <table>
547 <tr><th>
548 <tr><td>All
549 </table>
550<tr>
551 <td>GpuAcc
552 <td>
553 <ul>
554 <li>All
555 </ul>
556 <td>
557 <table>
558 <tr><th>
559 <tr><td>All
560 </table>
561<tr>
562 <td rowspan="3">ConcatLayer
563 <td rowspan="3" style="width:200px;"> Layer to concatenate tensors along a given axis.
564 <td rowspan="3">
565 <ul>
566 <li>ANEURALNETWORKS_CONCATENATION
567 </ul>
568 <td>CpuRef
569 <td>
570 <ul>
571 <li>All
572 </ul>
573 <td>
574 <table>
575 <tr><th>
576 <tr><td>BFLOAT16
577 <tr><td>FLOAT16
578 <tr><td>FLOAT32
579 <tr><td>QASYMMS8
580 <tr><td>QASYMMU8
581 <tr><td>QSYMMS16
582 </table>
583<tr>
584 <td>CpuAcc
585 <td>
586 <ul>
587 <li>All
588 </ul>
589 <td>
590 <table>
591 <tr><th>
592 <tr><td>QASYMMU8
593 <tr><td>QASYMMS8
594 <tr><td>FLOAT16
595 <tr><td>FLOAT32
596 </table>
597<tr>
598 <td>GpuAcc
599 <td>
600 <ul>
601 <li>All
602 </ul>
603 <td>
604 <table>
605 <tr><th>
606 <tr><td>QASYMMU8
607 <tr><td>QASYMMS8
608 <tr><td>FLOAT16
609 <tr><td>FLOAT32
610 </table>
611<tr>
612 <td rowspan="3">ConstantLayer
613 <td rowspan="3" style="width:200px;"> Layer to provide a constant tensor.
614 <td rowspan="3">
615 <ul>
616 <li>N/A
617 </ul>
618 <td>CpuRef
619 <td>
620 <ul>
621 <li>All
622 </ul>
623 <td>
624 <table>
625 <tr><th>
626 <tr><td>BFLOAT16
627 <tr><td>FLOAT16
628 <tr><td>FLOAT32
629 <tr><td>QASYMMS8
630 <tr><td>QASYMMU8
631 <tr><td>QSYMMS8
632 <tr><td>QSYMMS16
633 <tr><td>SIGNED32
634 </table>
635<tr>
636 <td>CpuAcc
637 <td>
638 <ul>
639 <li>All
640 </ul>
641 <td>
642 <table>
643 <tr><th>
644 <tr><td>All
645 </table>
646<tr>
647 <td>GpuAcc
648 <td>
649 <ul>
650 <li>All
651 </ul>
652 <td>
653 <table>
654 <tr><th>
655 <tr><td>All
656 </table>
657<tr>
658 <td rowspan="3">ConvertBf16ToFp32Layer
659 <td rowspan="3" style="width:200px;"> Layer to convert BFloat16 tensor to Float32 tensor.
660 <td rowspan="3">
661 <ul>
662 <li>N/A
663 </ul>
664 <td>CpuRef
665 <td>
666 <ul>
667 <li>All
668 </ul>
669 <td>
670 <table>
671 <tr><th>
672 <tr><td>BFLOAT16
673 <tr><td>FLOAT32
674 </table>
675<tr>
676 <td>CpuAcc
677 <td>
678 <ul>
679 <li>All
680 </ul>
681 <td>
682 <table>
683 <tr><th>
684 <tr><td>BFLOAT16
685 <tr><td>FLOAT32
686 </table>
687<tr>
688 <td>GpuAcc
689 <td>
690 <ul>
691 <li>All
692 </ul>
693 <td>
694 <table>
695 <tr><th>
696 <tr><td>BFLOAT16
697 <tr><td>FLOAT32
698 </table>
699<tr>
700 <td rowspan="3">ConvertFp16ToFp32Layer
701 <td rowspan="3" style="width:200px;"> Layer to convert Float16 tensor to Float32 tensor.
702 <td rowspan="3">
703 <ul>
704 <li>N/A
705 </ul>
706 <td>CpuRef
707 <td>
708 <ul>
709 <li>All
710 </ul>
711 <td>
712 <table>
713 <tr><th>
714 <tr><td>FLOAT16
715 <tr><td>FLOAT32
716 </table>
717<tr>
718 <td>CpuAcc
719 <td>
720 <ul>
721 <li>All
722 </ul>
723 <td>
724 <table>
725 <tr><th>
726 <tr><td>FLOAT16
727 <tr><td>FLOAT32
728 </table>
729<tr>
730 <td>GpuAcc
731 <td>
732 <ul>
733 <li>All
734 </ul>
735 <td>
736 <table>
737 <tr><th>
738 <tr><td>FLOAT16
739 <tr><td>FLOAT32
740 </table>
741<tr>
742 <td rowspan="3">ConvertFp32ToBf16Layer
743 <td rowspan="3" style="width:200px;"> Layer to convert Float32 tensor to BFloat16 tensor.
744 <td rowspan="3">
745 <ul>
746 <li>N/A
747 </ul>
748 <td>CpuRef
749 <td>
750 <ul>
751 <li>All
752 </ul>
753 <td>
754 <table>
755 <tr><th>
756 <tr><td>BFLOAT16
757 <tr><td>FLOAT32
758 </table>
759<tr>
760 <td>CpuAcc
761 <td>
762 <ul>
763 <li>All
764 </ul>
765 <td>
766 <table>
767 <tr><th>
768 <tr><td>BFLOAT16
769 <tr><td>FLOAT32
770 </table>
771<tr>
772 <td>GpuAcc
773 <td>
774 <ul>
775 <li>All
776 </ul>
777 <td>
778 <table>
779 <tr><th>
780 <tr><td>BFLOAT16
781 <tr><td>FLOAT32
782 </table>
783<tr>
784 <td rowspan="3">ConvertFp32ToFp16Layer
785 <td rowspan="3" style="width:200px;"> Layer to convert Float32 tensor to Float16 tensor.
786 <td rowspan="3">
787 <ul>
788 <li>N/A
789 </ul>
790 <td>CpuRef
791 <td>
792 <ul>
793 <li>All
794 </ul>
795 <td>
796 <table>
797 <tr><th>
798 <tr><td>FLOAT16
799 <tr><td>FLOAT32
800 </table>
801<tr>
802 <td>CpuAcc
803 <td>
804 <ul>
805 <li>All
806 </ul>
807 <td>
808 <table>
809 <tr><th>
810 <tr><td>FLOAT16
811 <tr><td>FLOAT32
812 </table>
813<tr>
814 <td>GpuAcc
815 <td>
816 <ul>
817 <li>All
818 </ul>
819 <td>
820 <table>
821 <tr><th>
822 <tr><td>FLOAT16
823 <tr><td>FLOAT32
824 </table>
825<tr>
826 <td rowspan="3">Convolution2dLayer
827 <td rowspan="3" style="width:200px;"> Layer to compute a convolution operation.
828 <td rowspan="3">
829 <ul>
830 <li>ANEURALNETWORKS_CONV_2D
831 <li>ANEURALNETWORKS_GROUPED_CONV_2D
832 </ul>
833 <td>CpuRef
834 <td>
835 <ul>
836 <li>All
837 </ul>
838 <td>
839 <table>
840 <tr><th>
841 <tr><td>BFLOAT16
842 <tr><td>FLOAT16
843 <tr><td>FLOAT32
844 <tr><td>QASYMMS8
845 <tr><td>QASYMMU8
846 <tr><td>QSYMMS16
847 </table>
848<tr>
849 <td>CpuAcc
850 <td>
851 <ul>
852 <li>NHWC
853 <li>NCHW
854 </ul>
855 <td>
856 <table>
857 <tr><th>
858 <tr><td>SIGNED32
859 <tr><td>FLOAT16
860 <tr><td>FLOAT32
861 <tr><td>QASYMMU8
862 <tr><td>QASYMMS8
863 <tr><td>QUANTIZEDSYMM8PERAXIS
864 </table>
865<tr>
866 <td>GpuAcc
867 <td>
868 <ul>
869 <li>NHWC
870 <li>NCHW
871 </ul>
872 <td>
873 <table>
874 <tr><th>
875 <tr><td>SIGNED32
876 <tr><td>FLOAT16
877 <tr><td>FLOAT32
878 <tr><td>QASYMMU8
879 <tr><td>QASYMMS8
880 <tr><td>QUANTIZEDSYMM8PERAXIS
881 </table>
882<tr>
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100883 <td rowspan="3">Convolution3dLayer
884 <td rowspan="3" style="width:200px;"> Layer to compute a 3D convolution operation.
885 <td rowspan="3">
886 <ul>
887 <li>N/A
888 </ul>
889 <td>CpuRef
890 <td>
891 <ul>
892 <li>NDHWC
893 </ul>
894 <td>
895 <table>
896 <tr><th>
897 <tr><td>BFLOAT16
898 <tr><td>FLOAT16
899 <tr><td>FLOAT32
900 <tr><td>QASYMMS8
901 <tr><td>QASYMMU8
902 <tr><td>QSYMMS8
903 <tr><td>QSYMMS16
904 </table>
905<tr>
906 <td>CpuAcc
907 <td>
908 <ul>
909 <li>N/A
910 </ul>
911 <td>
912 <ul>
913 <li>N/A
914 </ul>
915<tr>
916 <td>GpuAcc
917 <td>
918 <ul>
919 <li>N/A
920 </ul>
921 <td>
922 <ul>
923 <li>N/A
924 </ul>
925<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100926 <td rowspan="1">DebugLayer
927 <td rowspan="1" style="width:200px;"> Layer to print out inter layer tensor information.
928 <td rowspan="1">
929 <ul>
930 <li>N/A
931 </ul>
932 <td>CpuRef
933 <td>
934 <ul>
935 <li>All
936 </ul>
937 <td>
938 <table>
939 <tr><th>
940 <tr><td>BFLOAT16
941 <tr><td>FLOAT16
942 <tr><td>FLOAT32
943 <tr><td>QASYMMS8
944 <tr><td>QASYMMU8
945 <tr><td>QSYMMS8
946 <tr><td>QSYMMS16
947 <tr><td>SIGNED32
948 </table>
949<tr>
950 <td rowspan="3">DepthToSpaceLayer
951 <td rowspan="3" style="width:200px;"> Layer to perform Depth to Space transformation.
952 <td rowspan="3">
953 <ul>
954 <li>ANEURALNETWORKS_DEPTH_TO_SPACE
955 </ul>
956 <td>CpuRef
957 <td>
958 <ul>
959 <li>All
960 </ul>
961 <td>
962 <table>
963 <tr><th>
964 <tr><td>BFLOAT16
965 <tr><td>FLOAT16
966 <tr><td>FLOAT32
967 <tr><td>QASYMMS8
968 <tr><td>QASYMMU8
969 <tr><td>QSYMMS16
970 </table>
971<tr>
972 <td>CpuAcc
973 <td>
974 <ul>
975 <li>NHWC
976 <li>NCHW
977 </ul>
978 <td>
979 <table>
980 <tr><th>
981 <tr><td>All
982 </table>
983<tr>
984 <td>GpuAcc
985 <td>
986 <ul>
987 <li>NHWC
988 <li>NCHW
989 </ul>
990 <td>
991 <table>
992 <tr><th>
993 <tr><td>All
994 </table>
995<tr>
996 <td rowspan="3">DepthwiseConvolution2dLayer
997 <td rowspan="3" style="width:200px;"> Layer to compute a deconvolution or transpose convolution.
998 <td rowspan="3">
999 <ul>
1000 <li>ANEURALNETWORKS_DEPTHWISE_CONV_2D
1001 </ul>
1002 <td>CpuRef
1003 <td>
1004 <ul>
1005 <li>All
1006 </ul>
1007 <td>
1008 <table>
1009 <tr><th>
1010 <tr><td>BFLOAT16
1011 <tr><td>FLOAT16
1012 <tr><td>FLOAT32
1013 <tr><td>QASYMMS8
1014 <tr><td>QASYMMU8
1015 <tr><td>QSYMMS8
1016 <tr><td>QSYMMS16
1017 </table>
1018<tr>
1019 <td>CpuAcc
1020 <td>
1021 <ul>
1022 <li>NHWC
1023 <li>NCHW
1024 </ul>
1025 <td>
1026 <table>
1027 <tr><th>
1028 <tr><td>FLOAT16
1029 <tr><td>FLOAT32
1030 <tr><td>SIGNED32
1031 <tr><td>QASYMMU8
1032 <tr><td>QASYMMS8
1033 <tr><td>QUANTIZEDSYMM8PERAXIS
1034 </table>
1035<tr>
1036 <td>GpuAcc
1037 <td>
1038 <ul>
1039 <li>NHWC
1040 <li>NCHW
1041 </ul>
1042 <td>
1043 <table>
1044 <tr><th>
1045 <tr><td>FLOAT16
1046 <tr><td>FLOAT32
1047 <tr><td>SIGNED32
1048 <tr><td>QASYMMU8
1049 <tr><td>QASYMMS8
1050 <tr><td>QUANTIZEDSYMM8PERAXIS
1051 </table>
1052<tr>
1053 <td rowspan="3">DequantizeLayer
1054 <td rowspan="3" style="width:200px;"> Layer to dequantize the values in a tensor.
1055 <td rowspan="3">
1056 <ul>
1057 <li>ANEURALNETWORKS_DEQUANTIZE
1058 </ul>
1059 <td>CpuRef
1060 <td>
1061 <ul>
1062 <li>All
1063 </ul>
1064 <td>
1065 <table>
1066 <tr><th>
1067 <tr><td>QASYMMS8
1068 <tr><td>QASYMMU8
1069 <tr><td>QSYMMS8
1070 <tr><td>QSYMMS16
1071 </table>
1072<tr>
1073 <td>CpuAcc
1074 <td>
1075 <ul>
1076 <li>All
1077 </ul>
1078 <td>
1079 <table>
1080 <tr><th>
1081 <tr><td>FLOAT16
1082 <tr><td>FLOAT32
1083 <tr><td>QASYMMU8
1084 <tr><td>QASYMMS8
1085 <tr><td>QUANTIZEDSYMM8PERAXIS
1086 <tr><td>QSYMMS8
1087 <tr><td>QSYMMS16
1088 </table>
1089<tr>
1090 <td>GpuAcc
1091 <td>
1092 <ul>
1093 <li>All
1094 </ul>
1095 <td>
1096 <table>
1097 <tr><th>
1098 <tr><td>FLOAT16
1099 <tr><td>FLOAT32
1100 <tr><td>QASYMMU8
1101 <tr><td>QASYMMS8
1102 <tr><td>QUANTIZEDSYMM8PERAXIS
1103 <tr><td>QSYMMS8
1104 <tr><td>QSYMMS16
1105 </table>
1106<tr>
1107 <td rowspan="2">DetectionPostProcessLayer
1108 <td rowspan="2" style="width:200px;"> Layer to generate the detection output based on center size encoded boxes, class prediction and anchors by doing non maximum suppression (NMS).
1109 <td rowspan="2">
1110 <ul>
1111 <li>ANEURALNETWORKS_DETECTION_POSTPROCESSING
1112 </ul>
1113 <td>CpuRef
1114 <td>
1115 <ul>
1116 <li>All
1117 </ul>
1118 <td>
1119 <table>
1120 <tr><th>
1121 <tr><td>BFLOAT16
1122 <tr><td>FLOAT16
1123 <tr><td>FLOAT32
1124 <tr><td>QASYMMS8
1125 <tr><td>QASYMMU8
1126 <tr><td>QSYMMS16
1127 </table>
1128<tr>
1129 <td>CpuAcc
1130 <td>
1131 <ul>
1132 <li>All
1133 </ul>
1134 <td>
1135 <table>
1136 <tr><th>
1137 <tr><td>QASYMMU8
1138 <tr><td>QASYMMS8
1139 <tr><td>FLOAT32
1140 </table>
1141<tr>
1142 <td rowspan="3">DivisionLayer
1143 <td rowspan="3" style="width:200px;"> Layer to divide 2 tensors.
1144 <td rowspan="3">
1145 <ul>
1146 <li>ANEURALNETWORKS_DIV
1147 </ul>
1148 <td>CpuRef
1149 <td>
1150 <ul>
1151 <li>All
1152 </ul>
1153 <td>
1154 <table>
1155 <tr><th>
1156 <tr><td>BFLOAT16
1157 <tr><td>FLOAT16
1158 <tr><td>FLOAT32
1159 <tr><td>QASYMMS8
1160 <tr><td>QASYMMU8
1161 <tr><td>QSYMMS16
1162 <tr><td>SIGNED32
1163 </table>
1164<tr>
1165 <td>CpuAcc
1166 <td>
1167 <ul>
1168 <li>All
1169 </ul>
1170 <td>
1171 <table>
1172 <tr><th>
1173 <tr><td>FLOAT16
1174 <tr><td>FLOAT32
1175 </table>
1176<tr>
1177 <td>GpuAcc
1178 <td>
1179 <ul>
1180 <li>All
1181 </ul>
1182 <td>
1183 <table>
1184 <tr><th>
1185 <tr><td>FLOAT16
1186 <tr><td>FLOAT32
1187 </table>
1188<tr>
1189 <td rowspan="3">ElementwiseBaseLayer
1190 <td rowspan="3" style="width:200px;"> Layer to perform Add - Div - Max - Min - Mul operations.
1191 <td rowspan="3">
1192 <ul>
1193 <li>ANEURALNETWORKS_ADD
1194 <li>ANEURALNETWORKS_DIV
1195 <li>ANEURALNETWORKS_MAXIMUM
1196 <li>ANEURALNETWORKS_MINIMUM
1197 <li>ANEURALNETWORKS_MUL
1198 </ul>
1199 <td>CpuRef
1200 <td>
1201 <ul>
1202 <li>All
1203 </ul>
1204 <td>
1205 <table>
1206 <tr><th>
1207 <tr><td>BFLOAT16
1208 <tr><td>FLOAT16
1209 <tr><td>FLOAT32
1210 <tr><td>QASYMMS8
1211 <tr><td>QASYMMU8
1212 <tr><td>QSYMMS16
1213 <tr><td>SIGNED32
1214 </table>
1215<tr>
1216 <td>CpuAcc
1217 <td>
1218 <ul>
1219 <li>All
1220 </ul>
1221 <td>
1222 <table>
1223 <tr><th>
1224 <tr><td>QASYMMU8
1225 <tr><td>QASYMMS8
1226 <tr><td>QSYMMS16
1227 <tr><td>SIGNED32
1228 <tr><td>FLOAT16
1229 <tr><td>FLOAT32
1230 </table>
1231<tr>
1232 <td>GpuAcc
1233 <td>
1234 <ul>
1235 <li>All
1236 </ul>
1237 <td>
1238 <table>
1239 <tr><th>
1240 <tr><td>QASYMMU8
1241 <tr><td>QASYMMS8
1242 <tr><td>QSYMMS16
1243 <tr><td>SIGNED32
1244 <tr><td>FLOAT16
1245 <tr><td>FLOAT32
1246 </table>
1247<tr>
1248 <td rowspan="3">ElementwiseUnaryLayer
1249 <td rowspan="3" style="width:200px;"> Layer to perform Rsqrt - Exp - Neg - Log - Abs - Sin - Sqrt operations.
1250 <td rowspan="3">
1251 <ul>
1252 <li>ANEURALNETWORKS_ABS
1253 <li>ANEURALNETWORKS_EXP
1254 <li>ANEURALNETWORKS_LOG
1255 <li>ANEURALNETWORKS_NEG
1256 <li>ANEURALNETWORKS_RSQRT
1257 <li>ANEURALNETWORKS_SIN
1258 <li>ANEURALNETWORKS_SQRT
1259 </ul>
1260 <td>CpuRef
1261 <td>
1262 <ul>
1263 <li>All
1264 </ul>
1265 <td>
1266 <table>
1267 <tr><th>
1268 <tr><td>BFLOAT16
1269 <tr><td>FLOAT16
1270 <tr><td>FLOAT32
1271 <tr><td>QASYMMS8
1272 <tr><td>QASYMMU8
1273 <tr><td>QSYMMS16
1274 </table>
1275<tr>
1276 <td>CpuAcc
1277 <td>
1278 <ul>
1279 <li>All
1280 </ul>
1281 <td>
1282 <table>
1283 <tr><th>
1284 <tr><td>FLOAT16
1285 <tr><td>FLOAT32
1286 <tr><td>SIGNED32
1287 </table>
1288<tr>
1289 <td>GpuAcc
1290 <td>
1291 <ul>
1292 <li>All
1293 </ul>
1294 <td>
1295 <table>
1296 <tr><th>
1297 <tr><td>FLOAT16
1298 <tr><td>FLOAT32
1299 </table>
1300<tr>
1301 <td rowspan="1">FakeQuantizationLayer
1302 <td rowspan="1" style="width:200px;"> Layer to quantize float values and dequantize afterwards. The current implementation does not dequantize the values.
1303 <td rowspan="1">
1304 <ul>
1305 <li>N/A
1306 </ul>
1307 <td>CpuRef
1308 <td>
1309 <ul>
1310 <li>All
1311 </ul>
1312 <td>
1313 <table>
1314 <tr><th>
1315 <tr><td>FLOAT32
1316 </table>
1317<tr>
1318 <td rowspan="3">FillLayer
1319 <td rowspan="3" style="width:200px;"> Layer to set the values of a tensor with a given value.
1320 <td rowspan="3">
1321 <ul>
1322 <li>ANEURALNETWORKS_FILL
1323 </ul>
1324 <td>CpuRef
1325 <td>
1326 <ul>
1327 <li>All
1328 </ul>
1329 <td>
1330 <table>
1331 <tr><th>
1332 <tr><td>FLOAT16
1333 <tr><td>FLOAT32
1334 <tr><td>SIGNED32
1335 </table>
1336<tr>
1337 <td>CpuAcc
1338 <td>
1339 <ul>
1340 <li>All
1341 </ul>
1342 <td>
1343 <table>
1344 <tr><th>
1345 <tr><td>All
1346 </table>
1347<tr>
1348 <td>GpuAcc
1349 <td>
1350 <ul>
1351 <li>All
1352 </ul>
1353 <td>
1354 <table>
1355 <tr><th>
1356 <tr><td>All
1357 </table>
1358<tr>
1359 <td rowspan="3">FloorLayer
1360 <td rowspan="3" style="width:200px;"> Layer to round the value to the lowest whole number.
1361 <td rowspan="3">
1362 <ul>
1363 <li>ANEURALNETWORKS_FLOOR
1364 </ul>
1365 <td>CpuRef
1366 <td>
1367 <ul>
1368 <li>All
1369 </ul>
1370 <td>
1371 <table>
1372 <tr><th>
1373 <tr><td>BFLOAT16
1374 <tr><td>FLOAT16
1375 <tr><td>FLOAT32
1376 </table>
1377<tr>
1378 <td>CpuAcc
1379 <td>
1380 <ul>
1381 <li>All
1382 </ul>
1383 <td>
1384 <table>
1385 <tr><th>
1386 <tr><td>FLOAT32
1387 <tr><td>FLOAT16
1388 </table>
1389<tr>
1390 <td>GpuAcc
1391 <td>
1392 <ul>
1393 <li>All
1394 </ul>
1395 <td>
1396 <table>
1397 <tr><th>
1398 <tr><td>FLOAT32
1399 <tr><td>FLOAT16
1400 </table>
1401<tr>
1402 <td rowspan="3">FullyConnectedLayer
1403 <td rowspan="3" style="width:200px;"> Layer to perform a fully connected / dense operation.
1404 <td rowspan="3">
1405 <ul>
1406 <li>ANEURALNETWORKS_FULLY_CONNECTED
1407 </ul>
1408 <td>CpuRef
1409 <td>
1410 <ul>
1411 <li>All
1412 </ul>
1413 <td>
1414 <table>
1415 <tr><th>
1416 <tr><td>BFLOAT16
1417 <tr><td>FLOAT16
1418 <tr><td>FLOAT32
1419 <tr><td>QASYMMS8
1420 <tr><td>QASYMMU8
1421 <tr><td>QSYMMS16
1422 </table>
1423<tr>
1424 <td>CpuAcc
1425 <td>
1426 <ul>
1427 <li>NHWC
1428 <li>NCHW
1429 </ul>
1430 <td>
1431 <table>
1432 <tr><th>
1433 <tr><td>SIGNED32
1434 <tr><td>FLOAT16
1435 <tr><td>FLOAT32
1436 <tr><td>QASYMMU8
1437 <tr><td>QASYMMS8
1438 </table>
1439<tr>
1440 <td>GpuAcc
1441 <td>
1442 <ul>
1443 <li>NHWC
1444 <li>NCHW
1445 </ul>
1446 <td>
1447 <table>
1448 <tr><th>
1449 <tr><td>SIGNED32
1450 <tr><td>FLOAT16
1451 <tr><td>FLOAT32
1452 <tr><td>QASYMMU8
1453 <tr><td>QASYMMS8
1454 </table>
1455<tr>
1456 <td rowspan="3">GatherLayer
1457 <td rowspan="3" style="width:200px;"> Layer to perform the gather operation along the chosen axis.
1458 <td rowspan="3">
1459 <ul>
1460 <li>ANEURALNETWORKS_GATHER
1461 </ul>
1462 <td>CpuRef
1463 <td>
1464 <ul>
1465 <li>All
1466 </ul>
1467 <td>
1468 <table>
1469 <tr><th>
1470 <tr><td>BFLOAT16
1471 <tr><td>FLOAT16
1472 <tr><td>FLOAT32
1473 <tr><td>QASYMMS8
1474 <tr><td>QASYMMU8
1475 <tr><td>QSYMMS16
1476 <tr><td>SIGNED32
1477 </table>
1478<tr>
1479 <td>CpuAcc
1480 <td>
1481 <ul>
1482 <li>All
1483 </ul>
1484 <td>
1485 <table>
1486 <tr><th>
1487 <tr><td>All
1488 </table>
1489<tr>
1490 <td>GpuAcc
1491 <td>
1492 <ul>
1493 <li>All
1494 </ul>
1495 <td>
1496 <table>
1497 <tr><th>
1498 <tr><td>All
1499 </table>
1500<tr>
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001501 <td rowspan="3">GatherNdLayer
1502 <td rowspan="3" style="width:200px;"> Layer to perform the gatherNd operation.
1503 <td rowspan="3">
1504 <ul>
1505 <li>N/A
1506 </ul>
1507 <td>CpuRef
1508 <td>
1509 <ul>
1510 <li>All
1511 </ul>
1512 <td>
1513 <table>
1514 <tr><th>
1515 <tr><td>BFLOAT16
1516 <tr><td>FLOAT16
1517 <tr><td>FLOAT32
1518 <tr><td>QASYMMS8
1519 <tr><td>QASYMMU8
1520 <tr><td>QSYMMS16
1521 <tr><td>SIGNED32
1522 </table>
1523<tr>
1524 <td>CpuAcc
1525 <td>
1526 <ul>
Teresa Charlinbd22c7d2022-04-26 18:14:12 +01001527 <li>All
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001528 </ul>
1529 <td>
Teresa Charlinbd22c7d2022-04-26 18:14:12 +01001530 <table>
1531 <tr><th>
1532 <tr><td>BFLOAT16
1533 <tr><td>FLOAT16
1534 <tr><td>FLOAT32
1535 <tr><td>QASYMMS8
1536 <tr><td>QASYMMU8
1537 <tr><td>QSYMMS16
1538 <tr><td>SIGNED32
1539 </table>
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001540<tr>
1541 <td>GpuAcc
1542 <td>
1543 <ul>
Teresa Charlin989e2f62022-04-27 16:26:11 +01001544 <li>All
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001545 </ul>
1546 <td>
Teresa Charlin989e2f62022-04-27 16:26:11 +01001547 <table>
1548 <tr><th>
1549 <tr><td>BFLOAT16
1550 <tr><td>FLOAT16
1551 <tr><td>FLOAT32
1552 <tr><td>QASYMMS8
1553 <tr><td>QASYMMU8
1554 <tr><td>QSYMMS16
1555 <tr><td>SIGNED32
1556 </table>
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001557<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01001558 <td rowspan="1">InputLayer
1559 <td rowspan="1" style="width:200px;"> Special layer used to provide input data to the computational network.
1560 <td rowspan="1">
1561 <ul>
1562 <li>N/A
1563 </ul>
1564 <td>All
1565 <td>
1566 <ul>
1567 <li>All
1568 </ul>
1569 <td>
1570 <table>
1571 <tr><th>
1572 <tr><td>All
1573 </table>
1574<tr>
1575 <td rowspan="3">InstanceNormalizationLayer
1576 <td rowspan="3" style="width:200px;"> Layer to perform an instance normalization on a given axis.
1577 <td rowspan="3">
1578 <ul>
1579 <li>ANEURALNETWORKS_INSTANCE_NORMALIZATION
1580 </ul>
1581 <td>CpuRef
1582 <td>
1583 <ul>
1584 <li>All
1585 </ul>
1586 <td>
1587 <table>
1588 <tr><th>
1589 <tr><td>BFLOAT16
1590 <tr><td>FLOAT16
1591 <tr><td>FLOAT32
1592 </table>
1593<tr>
1594 <td>CpuAcc
1595 <td>
1596 <ul>
1597 <li>NHWC
1598 <li>NCHW
1599 </ul>
1600 <td>
1601 <table>
1602 <tr><th>
1603 <tr><td>FLOAT16
1604 <tr><td>FLOAT32
1605 </table>
1606<tr>
1607 <td>GpuAcc
1608 <td>
1609 <ul>
1610 <li>NHWC
1611 <li>NCHW
1612 </ul>
1613 <td>
1614 <table>
1615 <tr><th>
1616 <tr><td>FLOAT16
1617 <tr><td>FLOAT32
1618 </table>
1619<tr>
1620 <td rowspan="3">L2NormalizationLayer
1621 <td rowspan="3" style="width:200px;"> Layer to perform an L2 normalization on a given axis.
1622 <td rowspan="3">
1623 <ul>
1624 <li>ANEURALNETWORKS_L2_NORMALIZATION
1625 </ul>
1626 <td>CpuRef
1627 <td>
1628 <ul>
1629 <li>All
1630 </ul>
1631 <td>
1632 <table>
1633 <tr><th>
1634 <tr><td>BFLOAT16
1635 <tr><td>FLOAT16
1636 <tr><td>FLOAT32
1637 <tr><td>QASYMMS8
1638 <tr><td>QASYMMU8
1639 <tr><td>QSYMMS16
1640 </table>
1641<tr>
1642 <td>CpuAcc
1643 <td>
1644 <ul>
1645 <li>NHWC
1646 <li>NCHW
1647 </ul>
1648 <td>
1649 <table>
1650 <tr><th>
1651 <tr><td>FLOAT16
1652 <tr><td>FLOAT32
1653 </table>
1654<tr>
1655 <td>GpuAcc
1656 <td>
1657 <ul>
1658 <li>NHWC
1659 <li>NCHW
1660 </ul>
1661 <td>
1662 <table>
1663 <tr><th>
1664 <tr><td>FLOAT16
1665 <tr><td>FLOAT32
1666 </table>
1667<tr>
1668 <td rowspan="3">LogSoftmaxLayer
1669 <td rowspan="3" style="width:200px;"> Layer to perform the log softmax activations given logits.
1670 <td rowspan="3">
1671 <ul>
1672 <li>N/A
1673 </ul>
1674 <td>CpuRef
1675 <td>
1676 <ul>
1677 <li>All
1678 </ul>
1679 <td>
1680 <table>
1681 <tr><th>
1682 <tr><td>BFLOAT16
1683 <tr><td>FLOAT16
1684 <tr><td>FLOAT32
1685 </table>
1686<tr>
1687 <td>CpuAcc
1688 <td>
1689 <ul>
1690 <li>All
1691 </ul>
1692 <td>
1693 <table>
1694 <tr><th>
1695 <tr><td>QASYMMU8
1696 <tr><td>QASYMMS8
1697 <tr><td>FLOAT16
1698 <tr><td>FLOAT32
1699 </table>
1700<tr>
1701 <td>GpuAcc
1702 <td>
1703 <ul>
1704 <li>All
1705 </ul>
1706 <td>
1707 <table>
1708 <tr><th>
1709 <tr><td>QASYMMU8
1710 <tr><td>QASYMMS8
1711 <tr><td>FLOAT16
1712 <tr><td>FLOAT32
1713 </table>
1714<tr>
1715 <td rowspan="3">LogicalBinaryLayer
1716 <td rowspan="3" style="width:200px;"> Layer to perform Logical AND - Logical NOT - Logical OR operations.
1717 <td rowspan="3">
1718 <ul>
1719 <li>ANEURALNETWORKS_LOGICAL_AND
1720 <li>ANEURALNETWORKS_LOGICAL_NOT
1721 <li>ANEURALNETWORKS_LOGICAL_OR
1722 </ul>
1723 <td>CpuRef
1724 <td>
1725 <ul>
1726 <li>All
1727 </ul>
1728 <td>
1729 <table>
1730 <tr><th>
1731 <tr><td>BOOLEAN
1732 </table>
1733<tr>
1734 <td>CpuAcc
1735 <td>
1736 <ul>
1737 <li>All
1738 </ul>
1739 <td>
1740 <table>
1741 <tr><th>
1742 <tr><td>BOOLEAN
1743 </table>
1744<tr>
1745 <td>GpuAcc
1746 <td>
1747 <ul>
1748 <li>All
1749 </ul>
1750 <td>
1751 <table>
1752 <tr><th>
1753 <tr><td>BOOLEAN
1754 </table>
1755<tr>
1756 <td rowspan="3">LstmLayer
1757 <td rowspan="3" style="width:200px;"> Layer to perform a single time step in a Long Short-Term Memory (LSTM) operation.
1758 <td rowspan="3">
1759 <ul>
1760 <li>ANEURALNETWORKS_LSTM
1761 </ul>
1762 <td>CpuRef
1763 <td>
1764 <ul>
1765 <li>All
1766 </ul>
1767 <td>
1768 <table>
1769 <tr><th>
1770 <tr><td>BFLOAT16
1771 <tr><td>FLOAT16
1772 <tr><td>QSYMMS16
1773 </table>
1774<tr>
1775 <td>CpuAcc
1776 <td>
1777 <ul>
1778 <li>All
1779 </ul>
1780 <td>
1781 <table>
1782 <tr><th>
1783 <tr><td>FLOAT16
1784 <tr><td>FLOAT32
1785 </table>
1786<tr>
1787 <td>GpuAcc
1788 <td>
1789 <ul>
1790 <li>All
1791 </ul>
1792 <td>
1793 <table>
1794 <tr><th>
1795 <tr><td>FLOAT16
1796 <tr><td>FLOAT32
1797 </table>
1798<tr>
1799 <td rowspan="3">MapLayer
1800 <td rowspan="3" style="width:200px;"> Layer to perform map operation on tensor.
1801 <td rowspan="3">
1802 <ul>
1803 <li>N/A
1804 </ul>
1805 <td>CpuRef
1806 <td>
1807 <ul>
1808 <li>All
1809 </ul>
1810 <td>
1811 <table>
1812 <tr><th>
1813 <tr><td>All
1814 </table>
1815<tr>
1816 <td>CpuAcc
1817 <td>
1818 <ul>
1819 <li>All
1820 </ul>
1821 <td>
1822 <table>
1823 <tr><th>
1824 <tr><td>All
1825 </table>
1826<tr>
1827 <td>GpuAcc
1828 <td>
1829 <ul>
1830 <li>All
1831 </ul>
1832 <td>
1833 <table>
1834 <tr><th>
1835 <tr><td>All
1836 </table>
1837<tr>
1838 <td rowspan="3">MaximumLayer
1839 <td rowspan="3" style="width:200px;"> Layer to perform an elementwise maximum of two tensors.
1840 <td rowspan="3">
1841 <ul>
1842 <li>N/A
1843 </ul>
1844 <td>CpuRef
1845 <td>
1846 <ul>
1847 <li>All
1848 </ul>
1849 <td>
1850 <table>
1851 <tr><th>
1852 <tr><td>BFLOAT16
1853 <tr><td>FLOAT16
1854 <tr><td>FLOAT32
1855 <tr><td>QASYMMS8
1856 <tr><td>QASYMMU8
1857 <tr><td>QSYMMS16
1858 <tr><td>SIGNED32
1859 </table>
1860<tr>
1861 <td>CpuAcc
1862 <td>
1863 <ul>
1864 <li>All
1865 </ul>
1866 <td>
1867 <table>
1868 <tr><th>
1869 <tr><td>QASYMMU8
1870 <tr><td>QASYMMS8
1871 <tr><td>FLOAT16
1872 <tr><td>FLOAT32
1873 <tr><td>SIGNED32
1874 </table>
1875<tr>
1876 <td>GpuAcc
1877 <td>
1878 <ul>
1879 <li>All
1880 </ul>
1881 <td>
1882 <table>
1883 <tr><th>
1884 <tr><td>QASYMMU8
1885 <tr><td>QASYMMS8
1886 <tr><td>QSYMMS16
1887 <tr><td>FLOAT16
1888 <tr><td>FLOAT32
1889 <tr><td>SIGNED32
1890 </table>
1891<tr>
1892 <td rowspan="3">MeanLayer
1893 <td rowspan="3" style="width:200px;"> Layer to perform reduce mean operation.
1894 <td rowspan="3">
1895 <ul>
1896 <li>ANEURALNETWORKS_MEAN
1897 </ul>
1898 <td>CpuRef
1899 <td>
1900 <ul>
1901 <li>All
1902 </ul>
1903 <td>
1904 <table>
1905 <tr><th>
1906 <tr><td>BFLOAT16
1907 <tr><td>FLOAT16
1908 <tr><td>FLOAT32
1909 <tr><td>QASYMMS8
1910 <tr><td>QASYMMU8
1911 <tr><td>QSYMMS16
1912 </table>
1913<tr>
1914 <td>CpuAcc
1915 <td>
1916 <ul>
1917 <li>All
1918 </ul>
1919 <td>
1920 <table>
1921 <tr><th>
1922 <tr><td>QASYMMU8
1923 <tr><td>QASYMMS8
1924 <tr><td>FLOAT16
1925 <tr><td>FLOAT32
1926 </table>
1927<tr>
1928 <td>GpuAcc
1929 <td>
1930 <ul>
1931 <li>All
1932 </ul>
1933 <td>
1934 <table>
1935 <tr><th>
1936 <tr><td>QASYMMU8
1937 <tr><td>QASYMMS8
1938 <tr><td>FLOAT16
1939 <tr><td>FLOAT32
1940 </table>
1941<tr>
1942 <td rowspan="3">MemCopyLayer
1943 <td rowspan="3" style="width:200px;"> Layer to perform memory copy operation.
1944 <td rowspan="3">
1945 <ul>
1946 <li>N/A
1947 </ul>
1948 <td>CpuRef
1949 <td>
1950 <ul>
1951 <li>All
1952 </ul>
1953 <td>
1954 <table>
1955 <tr><th>
1956 <tr><td>BFLOAT16
1957 <tr><td>FLOAT16
1958 <tr><td>FLOAT32
1959 <tr><td>QASYMMS8
1960 <tr><td>QASYMMU8
1961 <tr><td>QSYMMS16
1962 <tr><td>BOOLEAN
1963 </table>
1964<tr>
1965 <td>CpuAcc
1966 <td>
1967 <ul>
1968 <li>All
1969 </ul>
1970 <td>
1971 <table>
1972 <tr><th>
1973 <tr><td>All
1974 </table>
1975<tr>
1976 <td>GpuAcc
1977 <td>
1978 <ul>
1979 <li>All
1980 </ul>
1981 <td>
1982 <table>
1983 <tr><th>
1984 <tr><td>All
1985 </table>
1986<tr>
1987 <td rowspan="3">MemImportLayer
1988 <td rowspan="3" style="width:200px;"> Layer to perform memory import operation.
1989 <td rowspan="3">
1990 <ul>
1991 <li>N/A
1992 </ul>
1993 <td>CpuRef
1994 <td>
1995 <ul>
1996 <li>All
1997 </ul>
1998 <td>
1999 <table>
2000 <tr><th>
2001 <tr><td>All
2002 </table>
2003<tr>
2004 <td>CpuAcc
2005 <td>
2006 <ul>
2007 <li>All
2008 </ul>
2009 <td>
2010 <table>
2011 <tr><th>
2012 <tr><td>All
2013 </table>
2014<tr>
2015 <td>GpuAcc
2016 <td>
2017 <ul>
2018 <li>All
2019 </ul>
2020 <td>
2021 <table>
2022 <tr><th>
2023 <tr><td>All
2024 </table>
2025<tr>
2026 <td rowspan="3">MergeLayer
2027 <td rowspan="3" style="width:200px;"> Layer to concatenate tensors along a given axis.
2028 <td rowspan="3">
2029 <ul>
2030 <li>ANEURALNETWORKS_CONCATENATION
2031 </ul>
2032 <td>CpuRef
2033 <td>
2034 <ul>
2035 <li>All
2036 </ul>
2037 <td>
2038 <table>
2039 <tr><th>
2040 <tr><td>BFLOAT16
2041 <tr><td>FLOAT16
2042 <tr><td>FLOAT32
2043 <tr><td>QASYMMS8
2044 <tr><td>QASYMMU8
2045 <tr><td>QSYMMS16
2046 </table>
2047<tr>
2048 <td>CpuAcc
2049 <td>
2050 <ul>
2051 <li>All
2052 </ul>
2053 <td>
2054 <table>
2055 <tr><th>
2056 <tr><td>QASYMMU8
2057 <tr><td>QASYMMS8
2058 <tr><td>FLOAT16
2059 <tr><td>FLOAT32
2060 </table>
2061<tr>
2062 <td>GpuAcc
2063 <td>
2064 <ul>
2065 <li>All
2066 </ul>
2067 <td>
2068 <table>
2069 <tr><th>
2070 <tr><td>QASYMMU8
2071 <tr><td>QASYMMS8
2072 <tr><td>FLOAT16
2073 <tr><td>FLOAT32
2074 </table>
2075<tr>
2076 <td rowspan="3">MinimumLayer
2077 <td rowspan="3" style="width:200px;"> Layer to perform an elementwise minimum of two tensors.
2078 <td rowspan="3">
2079 <ul>
2080 <li>ANEURALNETWORKS_MINIMUM
2081 </ul>
2082 <td>CpuRef
2083 <td>
2084 <ul>
2085 <li>All
2086 </ul>
2087 <td>
2088 <table>
2089 <tr><th>
2090 <tr><td>BFLOAT16
2091 <tr><td>FLOAT16
2092 <tr><td>FLOAT32
2093 <tr><td>QASYMMS8
2094 <tr><td>QASYMMU8
2095 <tr><td>QSYMMS16
2096 <tr><td>SIGNED32
2097 </table>
2098<tr>
2099 <td>CpuAcc
2100 <td>
2101 <ul>
2102 <li>All
2103 </ul>
2104 <td>
2105 <table>
2106 <tr><th>
2107 <tr><td>QASYMMU8
2108 <tr><td>QASYMMS8
2109 <tr><td>QSYMMS16
2110 <tr><td>FLOAT16
2111 <tr><td>FLOAT32
2112 </table>
2113<tr>
2114 <td>GpuAcc
2115 <td>
2116 <ul>
2117 <li>All
2118 </ul>
2119 <td>
2120 <table>
2121 <tr><th>
2122 <tr><td>QASYMMU8
2123 <tr><td>QASYMMS8
2124 <tr><td>QSYMMS16
2125 <tr><td>FLOAT16
2126 <tr><td>FLOAT32
2127 <tr><td>SIGNED32
2128 </table>
2129<tr>
2130 <td rowspan="3">MultiplicationLayer
2131 <td rowspan="3" style="width:200px;"> Layer to perform an elementwise multiplication of two tensors.
2132 <td rowspan="3">
2133 <ul>
2134 <li>ANEURALNETWORKS_MUL
2135 </ul>
2136 <td>CpuRef
2137 <td>
2138 <ul>
2139 <li>All
2140 </ul>
2141 <td>
2142 <table>
2143 <tr><th>
2144 <tr><td>BFLOAT16
2145 <tr><td>FLOAT16
2146 <tr><td>FLOAT32
2147 <tr><td>QASYMMS8
2148 <tr><td>QASYMMU8
2149 <tr><td>QSYMMS16
2150 <tr><td>SIGNED32
2151 </table>
2152<tr>
2153 <td>CpuAcc
2154 <td>
2155 <ul>
2156 <li>All
2157 </ul>
2158 <td>
2159 <table>
2160 <tr><th>
2161 <tr><td>QASYMMU8
2162 <tr><td>QASYMMS8
2163 <tr><td>QSYMMS16
2164 <tr><td>SIGNED32
2165 <tr><td>FLOAT16
2166 <tr><td>FLOAT32
2167 </table>
2168<tr>
2169 <td>GpuAcc
2170 <td>
2171 <ul>
2172 <li>All
2173 </ul>
2174 <td>
2175 <table>
2176 <tr><th>
2177 <tr><td>QASYMMU8
2178 <tr><td>QASYMMS8
2179 <tr><td>QSYMMS16
2180 <tr><td>SIGNED32
2181 <tr><td>FLOAT16
2182 <tr><td>FLOAT32
2183 <tr><td>SIGNED32
2184 </table>
2185<tr>
2186 <td rowspan="3">NormalizationLayer
2187 <td rowspan="3" style="width:200px;"> Layer to compute normalization operation.
2188 <td rowspan="3">
2189 <ul>
2190 <li>ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION
2191 </ul>
2192 <td>CpuRef
2193 <td>
2194 <ul>
2195 <li>All
2196 </ul>
2197 <td>
2198 <table>
2199 <tr><th>
2200 <tr><td>BFLOAT16
2201 <tr><td>FLOAT16
2202 <tr><td>FLOAT32
2203 <tr><td>QASYMMS8
2204 <tr><td>QASYMMU8
2205 <tr><td>QSYMMS16
2206 </table>
2207<tr>
2208 <td>CpuAcc
2209 <td>
2210 <ul>
2211 <li>NHWC
2212 <li>NCHW
2213 </ul>
2214 <td>
2215 <table>
2216 <tr><th>
2217 <tr><td>FLOAT32
2218 <tr><td>FLOAT16
2219 </table>
2220<tr>
2221 <td>GpuAcc
2222 <td>
2223 <ul>
2224 <li>NHWC
2225 <li>NCHW
2226 </ul>
2227 <td>
2228 <table>
2229 <tr><th>
2230 <tr><td>FLOAT32
2231 <tr><td>FLOAT16
2232 </table>
2233<tr>
2234 <td rowspan="1">OutputLayer
2235 <td rowspan="1" style="width:200px;"> A special layer providing access to a user supplied buffer into which the output of a network can be written.
2236 <td rowspan="1">
2237 <ul>
2238 <li>N/A
2239 </ul>
2240 <td>All
2241 <td>
2242 <ul>
2243 <li>All
2244 </ul>
2245 <td>
2246 <table>
2247 <tr><th>
2248 <tr><td>All
2249 </table>
2250<tr>
2251 <td rowspan="3">PadLayer
2252 <td rowspan="3" style="width:200px;"> Layer to pad a tensor.
2253 <td rowspan="3">
2254 <ul>
2255 <li>ANEURALNETWORKS_PAD
2256 <li>ANEURALNETWORKS_PAD_V2
2257 </ul>
2258 <td>CpuRef
2259 <td>
2260 <ul>
2261 <li>All
2262 </ul>
2263 <td>
2264 <table>
2265 <tr><th>
2266 <tr><td>BFLOAT16
2267 <tr><td>FLOAT16
2268 <tr><td>FLOAT32
2269 <tr><td>QASYMMS8
2270 <tr><td>QASYMMU8
2271 <tr><td>QSYMMS16
2272 </table>
2273<tr>
2274 <td>CpuAcc
2275 <td>
2276 <ul>
2277 <li>NHWC
2278 <li>NCHW
2279 </ul>
2280 <td>
2281 <table>
2282 <tr><th>
2283 <tr><td>All
2284 </table>
2285<tr>
2286 <td>GpuAcc
2287 <td>
2288 <ul>
2289 <li>NHWC
2290 <li>NCHW
2291 </ul>
2292 <td>
2293 <table>
2294 <tr><th>
2295 <tr><td>All
2296 </table>
2297<tr>
2298 <td rowspan="3">PermuteLayer
2299 <td rowspan="3" style="width:200px;"> Layer to transpose an ND tensor.
2300 <td rowspan="3">
2301 <ul>
2302 <li>ANEURALNETWORKS_TRANSPOSE
2303 </ul>
2304 <td>CpuRef
2305 <td>
2306 <ul>
2307 <li>All
2308 </ul>
2309 <td>
2310 <table>
2311 <tr><th>
2312 <tr><td>BFLOAT16
2313 <tr><td>FLOAT16
2314 <tr><td>FLOAT32
2315 <tr><td>QASYMMS8
2316 <tr><td>QASYMMU8
2317 <tr><td>QSYMMS16
2318 </table>
2319<tr>
2320 <td>CpuAcc
2321 <td>
2322 <ul>
2323 <li>NHWC
2324 <li>NCHW
2325 </ul>
2326 <td>
2327 <table>
2328 <tr><th>
2329 <tr><td>All
2330 </table>
2331<tr>
2332 <td>GpuAcc
2333 <td>
2334 <ul>
2335 <li>NHWC
2336 <li>NCHW
2337 </ul>
2338 <td>
2339 <table>
2340 <tr><th>
2341 <tr><td>All
2342 </table>
2343<tr>
2344 <td rowspan="3">Pooling2dLayer
Tamás Nyíri7b885b32021-10-26 14:47:57 +01002345 <td rowspan="3" style="width:200px;"> Layer to perform 2D pooling with the specified pooling operation.
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002346 <td rowspan="3">
2347 <ul>
2348 <li>ANEURALNETWORKS_AVERAGE_POOL_2D
2349 <li>ANEURALNETWORKS_L2_POOL_2D
2350 <li>ANEURALNETWORKS_MAX_POOL_2D
2351 </ul>
2352 <td>CpuRef
2353 <td>
2354 <ul>
2355 <li>All
2356 </ul>
2357 <td>
2358 <table>
2359 <tr><th>
2360 <tr><td>BFLOAT16
2361 <tr><td>FLOAT16
2362 <tr><td>FLOAT32
2363 <tr><td>QASYMMS8
2364 <tr><td>QASYMMU8
2365 <tr><td>QSYMMS16
2366 </table>
2367<tr>
2368 <td>CpuAcc
2369 <td>
2370 <ul>
2371 <li>NHWC
2372 <li>NCHW
2373 </ul>
2374 <td>
2375 <table>
2376 <tr><th>
2377 <tr><td>QASYMMU8
2378 <tr><td>QASYMMS8
2379 <tr><td>FLOAT16
2380 <tr><td>FLOAT32
2381 </table>
2382<tr>
2383 <td>GpuAcc
2384 <td>
2385 <ul>
2386 <li>NHWC
2387 <li>NCHW
2388 </ul>
2389 <td>
2390 <table>
2391 <tr><th>
2392 <tr><td>QASYMMU8
2393 <tr><td>QASYMMS8
2394 <tr><td>FLOAT16
2395 <tr><td>FLOAT32
2396 </table>
2397<tr>
Tamás Nyíri7b885b32021-10-26 14:47:57 +01002398 <td rowspan="3">Pooling3dLayer
2399 <td rowspan="3" style="width:200px;"> Layer to perform 3D pooling with the specified pooling operation.
2400 <td rowspan="3">
2401 <ul>
2402 <li>ANEURALNETWORKS_AVERAGE_POOL_3D
2403 <li>ANEURALNETWORKS_L2_POOL_3D
2404 <li>ANEURALNETWORKS_MAX_POOL_3D
2405 </ul>
2406 <td>CpuRef
2407 <td>
2408 <ul>
2409 <li>NDHWC
2410 </ul>
2411 <td>
2412 <table>
2413 <tr><th>
2414 <tr><td>BFLOAT16
2415 <tr><td>FLOAT16
2416 <tr><td>FLOAT32
2417 <tr><td>QASYMMS8
2418 <tr><td>QASYMMU8
2419 <tr><td>QSYMMS16
2420 </table>
2421<tr>
2422 <td>CpuAcc
2423 <td>
2424 <ul>
2425 <li>NA
2426 </ul>
2427 <td>
2428<tr>
2429 <td>GpuAcc
2430 <td>
2431 <ul>
2432 <li>NDHWC
2433 </ul>
2434<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002435 <td rowspan="1">PreCompiledLayer
2436 <td rowspan="1" style="width:200px;"> Opaque layer provided by a backend which provides an executable representation of a subgraph from the original network.
2437 <td rowspan="1">
2438 <ul>
2439 <li>N/A
2440 </ul>
2441 <td>N/A
2442 <td>N/A
2443 <td>N/A
2444<tr>
2445 <td rowspan="3">PreluLayer
2446 <td rowspan="3" style="width:200px;"> Layer to compute the activation layer with the PRELU activation function.
2447 <td rowspan="3">
2448 <ul>
2449 <li>ANEURALNETWORKS_PRELU
2450 </ul>
2451 <td>CpuRef
2452 <td>
2453 <ul>
2454 <li>All
2455 </ul>
2456 <td>
2457 <table>
2458 <tr><th>
2459 <tr><td>BFLOAT16
2460 <tr><td>FLOAT16
2461 <tr><td>FLOAT32
2462 <tr><td>QASYMMS8
2463 <tr><td>QASYMMU8
2464 <tr><td>QSYMMS16
2465 </table>
2466<tr>
2467 <td>CpuAcc
2468 <td>
2469 <ul>
2470 <li>All
2471 </ul>
2472 <td>
2473 <table>
2474 <tr><th>
2475 <tr><td>QASYMMU8
2476 <tr><td>QASYMMS8
2477 <tr><td>FLOAT16
2478 <tr><td>FLOAT32
2479 </table>
2480<tr>
2481 <td>GpuAcc
2482 <td>
2483 <ul>
2484 <li>All
2485 </ul>
2486 <td>
2487 <table>
2488 <tr><th>
2489 <tr><td>QASYMMU8
2490 <tr><td>QASYMMS8
2491 <tr><td>FLOAT16
2492 <tr><td>FLOAT32
2493 </table>
2494<tr>
2495 <td rowspan="3">QLstmLayer
2496 <td rowspan="3" style="width:200px;"> Layer to perform quantized LSTM (Long Short-Term Memory) operation.
2497 <td rowspan="3">
2498 <ul>
2499 <li>ANEURALNETWORKS_QUANTIZED_LSTM
2500 <li>ANEURALNETWORKS_QUANTIZED_16BIT_LSTM
2501 </ul>
2502 <td>CpuRef
2503 <td>
2504 <ul>
2505 <li>All
2506 </ul>
2507 <td>
2508 <table>
2509 <tr><th>
2510 <tr><td>All
2511 </table>
2512<tr>
2513 <td>CpuAcc
2514 <td>
2515 <ul>
2516 <li>All
2517 </ul>
2518 <td>
2519 <table>
2520 <tr><th>
2521 <tr><td>QASYMMS8
2522 <tr><td>QASYMMU8
2523 <tr><td>SIGNED32
2524 <tr><td>QSYMMS16
2525 </table>
2526<tr>
2527 <td>GpuAcc
2528 <td>
2529 <ul>
2530 <li>All
2531 </ul>
2532 <td>
2533 <table>
2534 <tr><th>
2535 <tr><td>QASYMMS8
2536 <tr><td>QASYMMU8
2537 <tr><td>SIGNED32
2538 <tr><td>QSYMMS16
2539 </table>
2540<tr>
2541 <td rowspan="3">QuantizeLayer
2542 <td rowspan="3" style="width:200px;"> Layer to perform quantization operation.
2543 <td rowspan="3">
2544 <ul>
2545 <li>ANEURALNETWORKS_QUANTIZE
2546 </ul>
2547 <td>CpuRef
2548 <td>
2549 <ul>
2550 <li>All
2551 </ul>
2552 <td>
2553 <table>
2554 <tr><th>
2555 <tr><td>BFLOAT16
2556 <tr><td>FLOAT16
2557 <tr><td>FLOAT32
2558 <tr><td>QASYMMS8
2559 <tr><td>QASYMMU8
2560 <tr><td>QSYMMS8
2561 <tr><td>QSYMMS16
2562 </table>
2563<tr>
2564 <td>CpuAcc
2565 <td>
2566 <ul>
2567 <li>All
2568 </ul>
2569 <td>
2570 <table>
2571 <tr><th>
2572 <tr><td>QASYMMU8
2573 <tr><td>QASYMMS8
2574 <tr><td>QASYMM16
2575 <tr><td>FLOAT16
2576 <tr><td>FLOAT32
2577 </table>
2578<tr>
2579 <td>GpuAcc
2580 <td>
2581 <ul>
2582 <li>All
2583 </ul>
2584 <td>
2585 <table>
2586 <tr><th>
2587 <tr><td>QASYMMU8
2588 <tr><td>QASYMMS8
2589 <tr><td>QASYMM16
2590 <tr><td>FLOAT16
2591 <tr><td>FLOAT32
2592 </table>
2593<tr>
2594 <td rowspan="3">QuantizedLstmLayer
2595 <td rowspan="3" style="width:200px;"> Layer to perform quantized LSTM (Long Short-Term Memory) operation.
2596 <td rowspan="3">
2597 <ul>
2598 <li>ANEURALNETWORKS_QUANTIZED_LSTM
2599 <li>ANEURALNETWORKS_QUANTIZED_16BIT_LSTM
2600 </ul>
2601 <td>CpuRef
2602 <td>
2603 <ul>
2604 <li>All
2605 </ul>
2606 <td>
2607 <table>
2608 <tr><th>
2609 <tr><td>All
2610 </table>
2611<tr>
2612 <td>CpuAcc
2613 <td>
2614 <ul>
2615 <li>All
2616 </ul>
2617 <td>
2618 <table>
2619 <tr><th>
2620 <tr><td>SIGNED32
2621 <tr><td>QASYMMU8
2622 <tr><td>QSYMMS16
2623 </table>
2624<tr>
2625 <td>GpuAcc
2626 <td>
2627 <ul>
2628 <li>All
2629 </ul>
2630 <td>
2631 <table>
2632 <tr><th>
2633 <tr><td>SIGNED32
2634 <tr><td>QASYMMU8
2635 <tr><td>QSYMMS16
2636 </table>
2637<tr>
2638 <td rowspan="3">RankLayer
2639 <td rowspan="3" style="width:200px;"> Layer to perform a rank operation.
2640 <td rowspan="3">
2641 <ul>
2642 <li>ANEURALNETWORKS_RANK
2643 </ul>
2644 <td>CpuRef
2645 <td>
2646 <ul>
2647 <li>All
2648 </ul>
2649 <td>
2650 <table>
2651 <tr><th>
2652 <tr><td>All
2653 </table>
2654<tr>
2655 <td>CpuAcc
2656 <td>
2657 <ul>
2658 <li>All
2659 </ul>
2660 <td>
2661 <table>
2662 <tr><th>
2663 <tr><td>All
2664 </table>
2665<tr>
2666 <td>GpuAcc
2667 <td>
2668 <ul>
2669 <li>All
2670 </ul>
2671 <td>
2672 <table>
2673 <tr><th>
2674 <tr><td>All
2675 </table>
2676<tr>
2677 <td rowspan="3">ReduceLayer
2678 <td rowspan="3" style="width:200px;"> Layer to perform reduce with the following operations - ARG_IDX_MAX: Index of the max value - ARG_IDX_MIN: Index of the min value - MEAN_SUM: Mean of sum - PROD: Product - SUM_SQUARE: Sum of squares - SUM: Sum - MIN: Min - MAX: Max
2679 <td rowspan="3">
2680 <ul>
2681 <li>ANEURALNETWORKS_REDUCE_MAX
2682 <li>ANEURALNETWORKS_REDUCE_MIN
2683 <li>ANEURALNETWORKS_REDUCE_SUM
Teresa Charlin32b78702021-09-03 11:25:54 +01002684 <li>ANEURALNETWORKS_REDUCE_PROD
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002685 </ul>
2686 <td>CpuRef
2687 <td>
2688 <ul>
2689 <li>All
2690 </ul>
2691 <td>
2692 <table>
2693 <tr><th>
2694 <tr><td>BFLOAT16
2695 <tr><td>FLOAT16
2696 <tr><td>FLOAT32
2697 <tr><td>QASYMMS8
2698 <tr><td>QASYMMU8
2699 <tr><td>QSYMMS16
2700 <tr><td>SIGNED32
2701 </table>
2702<tr>
2703 <td>CpuAcc
2704 <td>
2705 <ul>
2706 <li>All
2707 </ul>
2708 <td>
2709 <table>
2710 <tr><th>
2711 <tr><td>QASYMMU8
2712 <tr><td>QASYMMS8
2713 <tr><td>FLOAT16
2714 <tr><td>FLOAT32
2715 <tr><td>SIGNED32
2716 </table>
2717<tr>
2718 <td>GpuAcc
2719 <td>
2720 <ul>
2721 <li>All
2722 </ul>
2723 <td>
2724 <table>
2725 <tr><th>
2726 <tr><td>QASYMMU8
2727 <tr><td>QASYMMS8
2728 <tr><td>FLOAT16
2729 <tr><td>FLOAT32
2730 <tr><td>SIGNED32
2731 </table>
2732<tr>
2733 <td rowspan="3">ReshapeLayer
2734 <td rowspan="3" style="width:200px;"> Layer to reshape a tensor.
2735 <td rowspan="3">
2736 <ul>
2737 <li>ANEURALNETWORKS_RESHAPE
2738 <li>ANEURALNETWORKS_SQUEEZE
2739 <li>ANEURALNETWORKS_EXPAND_DIMS
2740 </ul>
2741 <td>CpuRef
2742 <td>
2743 <ul>
2744 <li>All
2745 </ul>
2746 <td>
2747 <table>
2748 <tr><th>
2749 <tr><td>BFLOAT16
2750 <tr><td>FLOAT16
2751 <tr><td>FLOAT32
2752 <tr><td>QASYMMS8
2753 <tr><td>QASYMMU8
2754 <tr><td>QSYMMS16
2755 <tr><td>SIGNED32
2756 <tr><td>BOOLEAN
2757 </table>
2758<tr>
2759 <td>CpuAcc
2760 <td>
2761 <ul>
2762 <li>All
2763 </ul>
2764 <td>
2765 <table>
2766 <tr><th>
2767 <tr><td>All
2768 </table>
2769<tr>
2770 <td>GpuAcc
2771 <td>
2772 <ul>
2773 <li>All
2774 </ul>
2775 <td>
2776 <table>
2777 <tr><th>
2778 <tr><td>All
2779 </table>
2780<tr>
2781 <td rowspan="3">ResizeLayer
2782 <td rowspan="3" style="width:200px;"> Layer to perform resize of a tensor using one of the interpolation methods: - Bilinear - Nearest Neighbor.
2783 <td rowspan="3">
2784 <ul>
2785 <li>ANEURALNETWORKS_RESIZE_BILINEAR
2786 <li>ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR
2787 </ul>
2788 <td>CpuRef
2789 <td>
2790 <ul>
2791 <li>All
2792 </ul>
2793 <td>
2794 <table>
2795 <tr><th>
2796 <tr><td>BFLOAT16
2797 <tr><td>FLOAT16
2798 <tr><td>FLOAT32
2799 <tr><td>QASYMMS8
2800 <tr><td>QASYMMU8
2801 <tr><td>QSYMMS16
2802 </table>
2803<tr>
2804 <td>CpuAcc
2805 <td>
2806 <ul>
2807 <li>NHWC
2808 <li>NCHW
2809 </ul>
2810 <td>
2811 <table>
2812 <tr><th>
2813 <tr><td>QASYMMU8
2814 <tr><td>QASYMMS8
2815 <tr><td>FLOAT16
2816 <tr><td>FLOAT32
2817 </table>
2818<tr>
2819 <td>GpuAcc
2820 <td>
2821 <ul>
2822 <li>NHWC
2823 <li>NCHW
2824 </ul>
2825 <td>
2826 <table>
2827 <tr><th>
2828 <tr><td>QASYMMU8
2829 <tr><td>QASYMMS8
2830 <tr><td>FLOAT16
2831 <tr><td>FLOAT32
2832 </table>
2833<tr>
2834 <td rowspan="3">RsqrtLayer
2835 <td rowspan="3" style="width:200px;"> Layer to perform Rsqrt operation.
2836 <td rowspan="3">
2837 <ul>
2838 <li>ANEURALNETWORKS_RSQRT
2839 </ul>
2840 <td>CpuRef
2841 <td>
2842 <ul>
2843 <li>All
2844 </ul>
2845 <td>
2846 <table>
2847 <tr><th>
2848 <tr><td>BFLOAT16
2849 <tr><td>FLOAT16
2850 <tr><td>FLOAT32
2851 <tr><td>QASYMMS8
2852 <tr><td>QASYMMU8
2853 <tr><td>QSYMMS16
2854 <tr><td>SIGNED32
2855 </table>
2856<tr>
2857 <td>CpuAcc
2858 <td>
2859 <ul>
2860 <li>All
2861 </ul>
2862 <td>
2863 <table>
2864 <tr><th>
2865 <tr><td>FLOAT16
2866 <tr><td>FLOAT32
2867 <tr><td>SIGNED32
2868 </table>
2869<tr>
2870 <td>GpuAcc
2871 <td>
2872 <ul>
2873 <li>All
2874 </ul>
2875 <td>
2876 <table>
2877 <tr><th>
2878 <tr><td>FLOAT16
2879 <tr><td>FLOAT32
2880 </table>
2881<tr>
2882 <td rowspan="3">ShapeLayer
2883 <td rowspan="3" style="width:200px;"> Layer to return the shape of the input tensor.
2884 <td rowspan="3">
2885 <ul>
2886 <li>N/A
2887 </ul>
2888 <td>CpuRef
2889 <td>
2890 <ul>
2891 <li>All
2892 </ul>
2893 <td>
2894 <table>
2895 <tr><th>
2896 <tr><td>All
2897 </table>
2898<tr>
2899 <td>CpuAcc
2900 <td>
2901 <ul>
2902 <li>All
2903 </ul>
2904 <td>
2905 <table>
2906 <tr><th>
2907 <tr><td>All
2908 </table>
2909<tr>
2910 <td>GpuAcc
2911 <td>
2912 <ul>
2913 <li>All
2914 </ul>
2915 <td>
2916 <table>
2917 <tr><th>
2918 <tr><td>All
2919 </table>
2920<tr>
2921 <td rowspan="3">SliceLayer
2922 <td rowspan="3" style="width:200px;"> Layer to perform tensor slicing.
2923 <td rowspan="3">
2924 <ul>
2925 <li>ANEURALNETWORKS_SLICE
2926 </ul>
2927 <td>CpuRef
2928 <td>
2929 <ul>
2930 <li>All
2931 </ul>
2932 <td>
2933 <table>
2934 <tr><th>
2935 <tr><td>BFLOAT16
2936 <tr><td>FLOAT32
2937 <tr><td>QASYMMS8
2938 <tr><td>QASYMMU8
2939 <tr><td>QSYMMS16
2940 </table>
2941<tr>
2942 <td>CpuAcc
2943 <td>
2944 <ul>
2945 <li>All
2946 </ul>
2947 <td>
2948 <table>
2949 <tr><th>
2950 <tr><td>All
2951 </table>
2952<tr>
2953 <td>GpuAcc
2954 <td>
2955 <ul>
2956 <li>All
2957 </ul>
2958 <td>
2959 <table>
2960 <tr><th>
2961 <tr><td>All
2962 </table>
2963<tr>
2964 <td rowspan="3">SoftmaxLayer
2965 <td rowspan="3" style="width:200px;"> Layer to perform softmax, log-softmax operation over the specified axis.
2966 <td rowspan="3">
2967 <ul>
2968 <li>ANEURALNETWORKS_LOG_SOFTMAX
2969 <li>ANEURALNETWORKS_SOFTMAX
2970 </ul>
2971 <td>CpuRef
2972 <td>
2973 <ul>
2974 <li>All
2975 </ul>
2976 <td>
2977 <table>
2978 <tr><th>
2979 <tr><td>BFLOAT16
2980 <tr><td>FLOAT16
2981 <tr><td>FLOAT32
2982 <tr><td>QASYMMS8
2983 <tr><td>QASYMMU8
2984 <tr><td>QSYMMS8
2985 <tr><td>QSYMMS16
2986 </table>
2987<tr>
2988 <td>CpuAcc
2989 <td>
2990 <ul>
2991 <li>All
2992 </ul>
2993 <td>
2994 <table>
2995 <tr><th>
2996 <tr><td>QASYMMU8
2997 <tr><td>QASYMMS8
2998 <tr><td>FLOAT16
2999 <tr><td>FLOAT32
3000 </table>
3001<tr>
3002 <td>GpuAcc
3003 <td>
3004 <ul>
3005 <li>All
3006 </ul>
3007 <td>
3008 <table>
3009 <tr><th>
3010 <tr><td>QASYMMU8
3011 <tr><td>QASYMMS8
3012 <tr><td>FLOAT16
3013 <tr><td>FLOAT32
3014 </table>
3015<tr>
3016 <td rowspan="3">SpaceToBatchNdLayer
3017 <td rowspan="3" style="width:200px;"> Layer to divide spatial dimensions of the tensor into a grid of blocks and interleaves these blocks with the batch dimension.
3018 <td rowspan="3">
3019 <ul>
3020 <li>ANEURALNETWORKS_SPACE_TO_BATCH_ND
3021 </ul>
3022 <td>CpuRef
3023 <td>
3024 <ul>
3025 <li>All
3026 </ul>
3027 <td>
3028 <table>
3029 <tr><th>
3030 <tr><td>BFLOAT16
3031 <tr><td>FLOAT16
3032 <tr><td>FLOAT32
3033 <tr><td>QASYMMS8
3034 <tr><td>QASYMMU8
3035 <tr><td>QSYMMS16
3036 </table>
3037<tr>
3038 <td>CpuAcc
3039 <td>
3040 <ul>
3041 <li>NHWC
3042 <li>NCHW
3043 </ul>
3044 <td>
3045 <table>
3046 <tr><th>
3047 <tr><td>All
3048 </table>
3049<tr>
3050 <td>GpuAcc
3051 <td>
3052 <ul>
3053 <li>NHWC
3054 <li>NCHW
3055 </ul>
3056 <td>
3057 <table>
3058 <tr><th>
3059 <tr><td>All
3060 </table>
3061<tr>
3062 <td rowspan="3">SpaceToDepthLayer
3063 <td rowspan="3" style="width:200px;"> Layer to rearrange blocks of spatial data into depth.
3064 <td rowspan="3">
3065 <ul>
3066 <li>ANEURALNETWORKS_SPACE_TO_DEPTH
3067 </ul>
3068 <td>CpuRef
3069 <td>
3070 <ul>
3071 <li>All
3072 </ul>
3073 <td>
3074 <table>
3075 <tr><th>
3076 <tr><td>BFLOAT16
3077 <tr><td>FLOAT16
3078 <tr><td>FLOAT32
3079 <tr><td>QASYMMS8
3080 <tr><td>QASYMMU8
3081 <tr><td>QSYMMS16
3082 </table>
3083<tr>
3084 <td>CpuAcc
3085 <td>
3086 <ul>
3087 <li>NHWC
3088 <li>NCHW
3089 </ul>
3090 <td>
3091 <table>
3092 <tr><th>
3093 <tr><td>All
3094 </table>
3095<tr>
3096 <td>GpuAcc
3097 <td>
3098 <ul>
3099 <li>NHWC
3100 <li>NCHW
3101 </ul>
3102 <td>
3103 <table>
3104 <tr><th>
3105 <tr><td>All
3106 </table>
3107<tr>
3108 <td rowspan="3">SplitterLayer
3109 <td rowspan="3" style="width:200px;"> Layer to split a tensor along a given axis.
3110 <td rowspan="3">
3111 <ul>
3112 <li>ANEURALNETWORKS_SPLIT
3113 </ul>
3114 <td>CpuRef
3115 <td>
3116 <ul>
3117 <li>All
3118 </ul>
3119 <td>
3120 <table>
3121 <tr><th>
3122 <tr><td>BFLOAT16
3123 <tr><td>FLOAT16
3124 <tr><td>FLOAT32
3125 <tr><td>QASYMMS8
3126 <tr><td>QASYMMU8
3127 <tr><td>QSYMMS16
3128 </table>
3129<tr>
3130 <td>CpuAcc
3131 <td>
3132 <ul>
3133 <li>All
3134 </ul>
3135 <td>
3136 <table>
3137 <tr><th>
3138 <tr><td>All
3139 </table>
3140<tr>
3141 <td>GpuAcc
3142 <td>
3143 <ul>
3144 <li>All
3145 </ul>
3146 <td>
3147 <table>
3148 <tr><th>
3149 <tr><td>All
3150 </table>
3151<tr>
3152 <td rowspan="3">StackLayer
3153 <td rowspan="3" style="width:200px;"> Layer to stack tensors along an axis.
3154 <td rowspan="3">
3155 <ul>
3156 <li>N/A
3157 </ul>
3158 <td>CpuRef
3159 <td>
3160 <ul>
3161 <li>All
3162 </ul>
3163 <td>
3164 <table>
3165 <tr><th>
3166 <tr><td>BFLOAT16
3167 <tr><td>FLOAT16
3168 <tr><td>FLOAT32
3169 <tr><td>QASYMMS8
3170 <tr><td>QASYMMU8
3171 <tr><td>QSYMMS16
3172 </table>
3173<tr>
3174 <td>CpuAcc
3175 <td>
3176 <ul>
3177 <li>All
3178 </ul>
3179 <td>
3180 <table>
3181 <tr><th>
3182 <tr><td>All
3183 </table>
3184<tr>
3185 <td>GpuAcc
3186 <td>
3187 <ul>
3188 <li>All
3189 </ul>
3190 <td>
3191 <table>
3192 <tr><th>
3193 <tr><td>All
3194 </table>
3195<tr>
3196 <td rowspan="1">StandInLayer
3197 <td rowspan="1" style="width:200px;"> A layer to represent "unknown" or "unsupported" operations in the input graph. It has a configurable number of input and output slots and an optional name.
3198 <td rowspan="1">
3199 <ul>
3200 <li>N/A
3201 </ul>
3202 <td>N/A
3203 <td>N/A
3204 <td>N/A
3205<tr>
3206 <td rowspan="3">StridedSliceLayer
3207 <td rowspan="3" style="width:200px;"> Layer to extract a strided slice of a tensor.
3208 <td rowspan="3">
3209 <ul>
3210 <li>ANEURALNETWORKS_STRIDED_SLICE
3211 </ul>
3212 <td>CpuRef
3213 <td>
3214 <ul>
3215 <li>All
3216 </ul>
3217 <td>
3218 <table>
3219 <tr><th>
3220 <tr><td>BFLOAT16
3221 <tr><td>FLOAT32
3222 <tr><td>QASYMMS8
3223 <tr><td>QASYMMU8
3224 <tr><td>QSYMMS16
3225 </table>
3226<tr>
3227 <td>CpuAcc
3228 <td>
3229 <ul>
3230 <li>All
3231 </ul>
3232 <td>
3233 <table>
3234 <tr><th>
3235 <tr><td>All
3236 </table>
3237<tr>
3238 <td>GpuAcc
3239 <td>
3240 <ul>
3241 <li>All
3242 </ul>
3243 <td>
3244 <table>
3245 <tr><th>
3246 <tr><td>All
3247 </table>
3248<tr>
3249 <td rowspan="3">SubtractionLayer
3250 <td rowspan="3" style="width:200px;"> Layer to perform an elementwise subtract of 2 tensors.
3251 <td rowspan="3">
3252 <ul>
3253 <li>ANEURALNETWORKS_SUB
3254 </ul>
3255 <td>CpuRef
3256 <td>
3257 <ul>
3258 <li>All
3259 </ul>
3260 <td>
3261 <table>
3262 <tr><th>
3263 <tr><td>BFLOAT16
3264 <tr><td>FLOAT16
3265 <tr><td>FLOAT32
3266 <tr><td>QASYMMS8
3267 <tr><td>QASYMMU8
3268 <tr><td>QSYMMS16
3269 <tr><td>SIGNED32
3270 </table>
3271<tr>
3272 <td>CpuAcc
3273 <td>
3274 <ul>
3275 <li>All
3276 </ul>
3277 <td>
3278 <table>
3279 <tr><th>
3280 <tr><td>QASYMMU8
3281 <tr><td>QASYMMS8
3282 <tr><td>QSYMMS16
3283 <tr><td>SIGNED32
3284 <tr><td>FLOAT16
3285 <tr><td>FLOAT32
3286 </table>
3287<tr>
3288 <td>GpuAcc
3289 <td>
3290 <ul>
3291 <li>All
3292 </ul>
3293 <td>
3294 <table>
3295 <tr><th>
3296 <tr><td>QASYMMU8
3297 <tr><td>QASYMMS8
3298 <tr><td>QSYMMS16
3299 <tr><td>SIGNED32
3300 <tr><td>FLOAT16
3301 <tr><td>FLOAT32
3302 </table>
3303<tr>
3304 <td rowspan="3">TransposeConvolution2dLayer
3305 <td rowspan="3" style="width:200px;"> Layer to perform 2D transpose convolution (deconvolution) operation.
3306 <td rowspan="3">
3307 <ul>
3308 <li>ANEURALNETWORKS_TRANSPOSE_CONV_2D
3309 </ul>
3310 <td>CpuRef
3311 <td>
3312 <ul>
3313 <li>All
3314 </ul>
3315 <td>
3316 <table>
3317 <tr><th>
3318 <tr><td>BFLOAT16
3319 <tr><td>FLOAT16
3320 <tr><td>FLOAT32
3321 <tr><td>QASYMMS8
3322 <tr><td>QASYMMU8
3323 <tr><td>QSYMMS8
3324 <tr><td>QSYMMS16
3325 </table>
3326<tr>
3327 <td>CpuAcc
3328 <td>
3329 <ul>
3330 <li>NHWC
3331 <li>NCHW
3332 </ul>
3333 <td>
3334 <table>
3335 <tr><th>
3336 <tr><td>SIGNED32
3337 <tr><td>FLOAT16
3338 <tr><td>FLOAT32
3339 <tr><td>QASYMMU8
3340 <tr><td>QASYMMS8
3341 <tr><td>QUANTIZEDSYMM8PERAXIS
3342 </table>
3343<tr>
3344 <td>GpuAcc
3345 <td>
3346 <ul>
3347 <li>NHWC
3348 <li>NCHW
3349 </ul>
3350 <td>
3351 <table>
3352 <tr><th>
3353 <tr><td>SIGNED32
3354 <tr><td>FLOAT16
3355 <tr><td>FLOAT32
3356 <tr><td>QASYMMU8
3357 <tr><td>QASYMMS8
3358 <tr><td>QUANTIZEDSYMM8PERAXIS
3359 </table>
3360<tr>
3361 <td rowspan="3">TransposeLayer
3362 <td rowspan="3" style="width:200px;"> Layer to transpose a tensor.
3363 <td rowspan="3">
3364 <ul>
3365 <li>ANEURALNETWORKS_TRANSPOSE
3366 </ul>
3367 <td>CpuRef
3368 <td>
3369 <ul>
3370 <li>All
3371 </ul>
3372 <td>
3373 <table>
3374 <tr><th>
3375 <tr><td>BFLOAT16
3376 <tr><td>FLOAT16
3377 <tr><td>FLOAT32
3378 <tr><td>QASYMMS8
3379 <tr><td>QASYMMU8
3380 <tr><td>QSYMMS16
3381 </table>
3382<tr>
3383 <td>CpuAcc
3384 <td>
3385 <ul>
3386 <li>All
3387 </ul>
3388 <td>
3389 <table>
3390 <tr><th>
3391 <tr><td>All
3392 </table>
3393<tr>
3394 <td>GpuAcc
3395 <td>
3396 <ul>
3397 <li>All
3398 </ul>
3399 <td>
3400 <table>
3401 <tr><th>
3402 <tr><td>All
3403 </table>
3404<tr>
3405 <td rowspan="3">UnidirectionalSquenceLstmLayer
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01003406 <td rowspan="3" style="width:200px;"> Layer to perform unidirectional sequence LSTM operation.
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01003407 <td rowspan="3">
3408 <ul>
3409 <li>ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM
3410 </ul>
3411 <td>CpuRef
3412 <td>
3413 <ul>
3414 <li>All
3415 </ul>
3416 <td>
3417 <table>
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01003418 <tr><th>Input Types
3419 <tr><td>FLOAT32
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01003420 </table>
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01003421 <table>
3422 <tr><th>Weight Types
3423 <tr><td>FLOAT32
3424 <tr><td>QASYMMS8
3425 </table>
Cathal Corbettfd5bec42022-03-03 15:13:23 +00003426 <td>CpuAcc
3427 <td>
3428 <ul>
3429 <li>All
3430 </ul>
3431 <td>
3432 <table>
3433 <tr><th>Input Types
3434 <tr><td>FLOAT32
3435 </table>
3436 <table>
3437 <tr><th>Weight Types
3438 <tr><td>FLOAT32
3439 </table>
Cathal Corbett4952a3e2022-03-03 15:14:18 +00003440 <td>GpuAcc
3441 <td>
3442 <ul>
3443 <li>All
3444 </ul>
3445 <td>
3446 <table>
3447 <tr><th>Input Types
3448 <tr><td>FLOAT32
3449 </table>
3450 <table>
3451 <tr><th>Weight Types
3452 <tr><td>FLOAT32
3453 </table>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01003454<tr>
3455 <td rowspan="3">UnmapLayer
3456 <td rowspan="3" style="width:200px;"> Layer to perform unmap operation on tensor.
3457 <td rowspan="3">
3458 <ul>
3459 <li>N/A
3460 </ul>
3461 <td>CpuRef
3462 <td>
3463 <ul>
3464 <li>All
3465 </ul>
3466 <td>
3467 <table>
3468 <tr><th>
3469 <tr><td>All
3470 </table>
3471<tr>
3472 <td>CpuAcc
3473 <td>
3474 <ul>
3475 <li>NHWC
3476 <li>NCHW
3477 </ul>
3478 <td>
3479 <table>
3480 <tr><th>
3481 <tr><td>All
3482 </table>
3483<tr>
3484 <td>GpuAcc
3485 <td>
3486 <ul>
3487 <li>NHWC
3488 <li>NCHW
3489 </ul>
3490 <td>
3491 <table>
3492 <tr><th>
3493 <tr><td>All
3494 </table>
3495</table>
3496
3497*/
3498} // namespace