blob: a37b6fa3a7b66ec7959a8cbd8499c7e65257a6fa [file] [log] [blame]
Teresa Charlin1fe6c812022-11-01 15:59:50 +00001/// Copyright (c) 2021, 2023 ARM Limited and Contributors. All rights reserved.
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002///
3/// SPDX-License-Identifier: MIT
4///
5
6namespace armnn
7{
8/**
9@page operator_list Arm NN Operators
10
Sadik Armagan1a9c9f62021-08-05 09:25:15 +010011
12@section S5_1_operator_list Arm NN Operators
13
14Arm NN supports operators that are listed in below table.
15
16Arm NN supports a wide list of data-types.
17The main data-types that the Machine Learning functions support are the following:
18 <ul>
19 <li><b>BFLOAT16:</b> 16-bit non-standard brain floating point
20 <li><b>QASYMMU8:</b> 8-bit unsigned asymmetric quantized
21 <li><b>QASYMMS8:</b> 8-bit signed asymmetric quantized
22 <li><b>QUANTIZEDSYMM8PERAXIS:</b> 8-bit signed symmetric quantized
Cathal Corbettb85113e2022-02-22 11:51:43 +000023 <li><b>QSYMMS8:</b> 8-bit signed symmetric quantized
24 <li><b>QSYMMS16:</b> 16-bit signed symmetric quantized
Sadik Armagan1a9c9f62021-08-05 09:25:15 +010025 <li><b>FLOAT32:</b> 32-bit single precision floating point
26 <li><b>FLOAT16:</b> 16-bit half precision floating point
27 <li><b>SIGNED32:</b> 32-bit signed integer
28 <li><b>BOOLEAN:</b> 8-bit unsigned char
29 <li><b>All:</b> Agnostic to any specific data type
30 </ul>
31
32Arm NN supports the following data layouts (fast changing dimension from right to left):
33 <ul>
34 <li><b>NHWC:</b> Layout where channels are in the fastest changing dimension
35 <li><b>NCHW:</b> Layout where width is in the fastest changing dimension
36 <li><b>All:</b> Agnostic to any specific data layout
37 </ul>
38where N = batches, C = channels, H = height, W = width
39
40<table>
41<caption id="multi_row"></caption>
42<tr>
43 <th>Operator
44 <th>Description
45 <th>Equivalent Android NNAPI Operator
46 <th>Backends
47 <th>Data Layouts
48 <th>Data Types
49<tr>
50 <td rowspan="3">AbsLayer
51 <td rowspan="3"> Layer to perform absolute operation.
52 <td rowspan="3">
53 <ul>
54 <li>ANEURALNETWORKS_ABS
55 </ul>
56 <td>CpuRef
57 <td>
58 <ul>
59 <li>All
60 </ul>
61 <td>
62 <table>
63 <tr><th>
64 <tr><td>BFLOAT16
65 <tr><td>FLOAT16
66 <tr><td>FLOAT32
67 <tr><td>QASYMMS8
68 <tr><td>QASYMMU8
69 <tr><td>QSYMMS16
70 <tr><td>SIGNED32
71 </table>
72<tr>
73 <td>CpuAcc
74 <td>
75 <ul>
76 <li>All
77 </ul>
78 <td>
79 <table>
80 <tr><th>
81 <tr><td>FLOAT16
82 <tr><td>FLOAT32
83 <tr><td>SIGNED32
84 </table>
85<tr>
86 <td>GpuAcc
87 <td>
88 <ul>
89 <li>All
90 </ul>
91 <td>
92 <table>
93 <tr><th>
94 <tr><td>FLOAT16
95 <tr><td>FLOAT32
96 </table>
97<tr>
98 <td rowspan="3">ActivationLayer
99 <td rowspan="3" style="width:200px;"> Layer to simulate an activation layer with the specified activation function.
100 <td rowspan="3">
101 <ul>
102 <li>ANEURALNETWORKS_ABS
103 <li>ANEURALNETWORKS_ELU
104 <li>ANEURALNETWORKS_HARD_SWISH
105 <li>ANEURALNETWORKS_LOGISTIC
106 <li>ANEURALNETWORKS_PRELU
107 <li>ANEURALNETWORKS_RELU
108 <li>ANEURALNETWORKS_RELU1
109 <li>ANEURALNETWORKS_RELU6
110 <li>ANEURALNETWORKS_SQRT
111 <li>ANEURALNETWORKS_TANH
112 </ul>
113 <td>CpuRef
114 <td>
115 <ul>
116 <li>All
117 </ul>
118 <td>
119 <table>
120 <tr><th>
121 <tr><td>BFLOAT16
122 <tr><td>FLOAT16
123 <tr><td>FLOAT32
124 <tr><td>QASYMMS8
125 <tr><td>QASYMMU8
126 <tr><td>QSYMMS16
127 </table>
128<tr>
129 <td>CpuAcc
130 <td>
131 <ul>
132 <li>All
133 </ul>
134 <td>
135 <table>
136 <tr><th>
137 <tr><td>QASYMMU8
138 <tr><td>QASYMMS8
139 <tr><td>QSYMMS16
140 <tr><td>FLOAT16
141 <tr><td>FLOAT32
142 </table>
143<tr>
144 <td>GpuAcc
145 <td>
146 <ul>
147 <li>All
148 </ul>
149 <td>
150 <table>
151 <tr><th>
152 <tr><td>QASYMMU8
153 <tr><td>QASYMMS8
154 <tr><td>QSYMMS16
155 <tr><td>FLOAT16
156 <tr><td>FLOAT32
157 </table>
158<tr>
159 <td rowspan="3">AdditionLayer
160 <td rowspan="3" style="width:200px;"> Layer to add 2 tensors.
161 <td rowspan="3">
162 <ul>
163 <li>ANEURALNETWORKS_ADD
164 </ul>
165 <td>CpuRef
166 <td>
167 <ul>
168 <li>All
169 </ul>
170 <td>
171 <table>
172 <tr><th>
173 <tr><td>BFLOAT16
174 <tr><td>FLOAT16
175 <tr><td>FLOAT32
176 <tr><td>QASYMMS8
177 <tr><td>QASYMMU8
178 <tr><td>QSYMMS16
179 <tr><td>SIGNED32
180 </table>
181<tr>
182 <td>CpuAcc
183 <td>
184 <ul>
185 <li>All
186 </ul>
187 <td>
188 <table>
189 <tr><th>
190 <tr><td>QASYMMU8
191 <tr><td>QASYMMS8
192 <tr><td>QSYMMS16
193 <tr><td>SIGNED32
194 <tr><td>FLOAT16
195 <tr><td>FLOAT32
196 </table>
197<tr>
198 <td>GpuAcc
199 <td>
200 <ul>
201 <li>All
202 </ul>
203 <td>
204 <table>
205 <tr><th>
206 <tr><td>QASYMMU8
207 <tr><td>QASYMMS8
208 <tr><td>QSYMMS16
209 <tr><td>SIGNED32
210 <tr><td>FLOAT16
211 <tr><td>FLOAT32
212 </table>
213<tr>
214 <td rowspan="3">ArgMinMaxLayer
215 <td rowspan="3" style="width:200px;"> Layer to calculate the index of the minimum or maximum values in a tensor
216 based on an axis.
217 <td rowspan="3">
218 <ul>
219 <li>ANEURALNETWORKS_ARGMAX
220 <li>ANEURALNETWORKS_ARGMIN
221 </ul>
222 <td>CpuRef
223 <td>
224 <ul>
225 <li>All
226 </ul>
227 <td>
228 <table>
229 <tr><th>
230 <tr><td>BFLOAT16
231 <tr><td>FLOAT16
232 <tr><td>FLOAT32
233 <tr><td>QASYMMS8
234 <tr><td>QASYMMU8
235 <tr><td>QSYMMS16
236 <tr><td>SIGNED32
237 <tr><td>SIGNED64
238 </table>
239<tr>
240 <td>CpuAcc
241 <td>
242 <ul>
243 <li>All
244 </ul>
245 <td>
246 <table>
247 <tr><th>
248 <tr><td>QASYMMU8
249 <tr><td>QASYMMS8
250 <tr><td>SIGNED32
251 <tr><td>FLOAT16
252 <tr><td>FLOAT32
253 </table>
254<tr>
255 <td>GpuAcc
256 <td>
257 <ul>
258 <li>All
259 </ul>
260 <td>
261 <table>
262 <tr><th>
263 <tr><td>QASYMMU8
264 <tr><td>QASYMMS8
265 <tr><td>SIGNED32
266 <tr><td>FLOAT16
267 <tr><td>FLOAT32
268 </table>
269<tr>
Samuel Yap6b478092022-07-06 15:36:03 +0100270 <td rowspan="3">BatchMatMulLayer
271 <td rowspan="3" style="width:200px;"> Layer to perform batch matrix multiplication.
272 <td rowspan="3">
273 <ul>
274 <li>N/A
275 </ul>
276 <td>CpuRef
277 <td>
278 <ul>
279 <li>All
280 </ul>
281 <td>
282 <table>
283 <tr><th>
284 <tr><td>BFLOAT16
285 <tr><td>FLOAT16
286 <tr><td>FLOAT32
287 <tr><td>QASYMMS8
288 <tr><td>QASYMMU8
289 <tr><td>QSYMMS16
290 </table>
291<tr>
292 <td>CpuAcc
293 <td>
294 <ul>
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100295 <li>All
Samuel Yap6b478092022-07-06 15:36:03 +0100296 </ul>
297 <td>
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100298 <table>
299 <tr><th>
300 <tr><td>FLOAT32
Teresa Charlin1fe6c812022-11-01 15:59:50 +0000301 <tr><td>QASYMMS8
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100302 </table>
Samuel Yap6b478092022-07-06 15:36:03 +0100303<tr>
304 <td>GpuAcc
305 <td>
306 <ul>
Teresa Charlin94916a52022-10-19 08:48:07 +0100307 <li>All
Samuel Yap6b478092022-07-06 15:36:03 +0100308 </ul>
309 <td>
Teresa Charlin94916a52022-10-19 08:48:07 +0100310 <table>
311 <tr><th>
312 <tr><td>FLOAT32
Teresa Charlin97a3aef2023-01-10 10:32:51 +0000313 <tr><td>QASYMMS8
Teresa Charlin94916a52022-10-19 08:48:07 +0100314 </table>
Samuel Yap6b478092022-07-06 15:36:03 +0100315<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100316 <td rowspan="3">BatchNormalizationLayer
317 <td rowspan="3" style="width:200px;"> Layer to perform batch normalization.
318 <td rowspan="3">
319 <ul>
320 <li>N/A
321 </ul>
322 <td>CpuRef
323 <td>
324 <ul>
325 <li>All
326 </ul>
327 <td>
328 <table>
329 <tr><th>
330 <tr><td>BFLOAT16
331 <tr><td>FLOAT16
332 <tr><td>FLOAT32
333 <tr><td>QASYMMS8
334 <tr><td>QASYMMU8
335 <tr><td>QSYMMS16
336 </table>
337<tr>
338 <td>CpuAcc
339 <td>
340 <ul>
341 <li>NHWC
342 <li>NCHW
343 </ul>
344 <td>
345 <table>
346 <tr><th>
347 <tr><td>FLOAT32
348 <tr><td>FLOAT16
349 </table>
350<tr>
351 <td>GpuAcc
352 <td>
353 <ul>
354 <li>NHWC
355 <li>NCHW
356 </ul>
357 <td>
358 <table>
359 <tr><th>
360 <tr><td>FLOAT32
361 <tr><td>FLOAT16
362 </table>
363<tr>
364 <td rowspan="3">BatchToSpaceNdLayer
365 <td rowspan="3" style="width:200px;"> Layer to perform a batch to space transformation.
366 <td rowspan="3">
367 <ul>
368 <li>ANEURALNETWORKS_BATCH_TO_SPACE_ND
369 </ul>
370 <td>CpuRef
371 <td>
372 <ul>
373 <li>All
374 </ul>
375 <td>
376 <table>
377 <tr><th>
378 <tr><td>BFLOAT16
379 <tr><td>FLOAT16
380 <tr><td>FLOAT32
381 <tr><td>QASYMMS8
382 <tr><td>QASYMMU8
383 <tr><td>QSYMMS16
384 </table>
385<tr>
386 <td>CpuAcc
387 <td>
388 <ul>
389 <li>NHWC
390 <li>NCHW
391 </ul>
392 <td>
393 <table>
394 <tr><th>
395 <tr><td>All
396 </table>
397<tr>
398 <td>GpuAcc
399 <td>
400 <ul>
401 <li>NHWC
402 <li>NCHW
403 </ul>
404 <td>
405 <table>
406 <tr><th>
407 <tr><td>All
408 </table>
409<tr>
410 <td rowspan="3">CastLayer
411 <td rowspan="3" style="width:200px;"> Layer to cast a tensor to a type.
412 <td rowspan="3">
413 <ul>
414 <li>ANEURALNETWORKS_CAST
415 </ul>
416 <td>CpuRef
417 <td>
418 <ul>
419 <li>All
420 </ul>
421 <td>
422 <table>
423 <tr><th>
424 <tr><td>BFLOAT16
425 <tr><td>FLOAT16
426 <tr><td>FLOAT32
427 <tr><td>QSYMMS8
428 <tr><td>QASYMMS8
429 <tr><td>QASYMMU8
430 <tr><td>QSYMMS16
431 <tr><td>SIGNED32
432 </table>
433<tr>
434 <td>CpuAcc
435 <td>
436 <ul>
437 <li>All
438 </ul>
439 <td>
440 <table>
441 <tr><th>
442 <tr><td>QASYMMS8
443 <tr><td>QASYMMU8
444 <tr><td>FLOAT16
445 <tr><td>SIGNED32
446 <tr><td>FLOAT32
447 </table>
448<tr>
449 <td>GpuAcc
450 <td>
451 <ul>
452 <li>All
453 </ul>
454 <td>
455 <table>
456 <tr><th>
457 <tr><td>QASYMMS8
458 <tr><td>QASYMMU8
459 <tr><td>SIGNED32
460 <tr><td>FLOAT16
461 <tr><td>FLOAT32
462 </table>
463<tr>
Teresa Charlincd203852021-09-24 18:15:39 +0100464 <td rowspan="3">ChannelShuffleLayer
465 <td rowspan="3" style="width:200px;"> Layer to reorganize the channels of a tensor.
466 <td rowspan="3">
467 <ul>
468 <li>ANEURALNETWORKS_CHANNEL_SHUFFLE
469 </ul>
470 <td>CpuRef
471 <td>
472 <ul>
473 <li>All
474 </ul>
475 <td>
476 <table>
477 <tr><th>
478 <tr><td>FLOAT16
479 <tr><td>FLOAT32
480 <tr><td>QSYMMS8
481 <tr><td>QASYMMS8
482 <tr><td>QASYMMU8
483 </table>
484<tr>
485 <td>CpuAcc
486 <td>
487 <ul>
488 <li>All
489 </ul>
490 <td>
491 <table>
492 <tr><th>
493 <tr><td>QASYMMS8
494 <tr><td>QASYMMU8
495 <tr><td>FLOAT16
496 <tr><td>FLOAT32
497 </table>
498<tr>
499 <td>GpuAcc
500 <td>
501 <ul>
502 <li>All
503 </ul>
504 <td>
505 <table>
506 <tr><th>
507 <tr><td>QASYMMS8
508 <tr><td>QASYMMU8
509 <tr><td>FLOAT16
510 <tr><td>FLOAT32
511 </table>
512<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100513 <td rowspan="3">ComparisonLayer
514 <td rowspan="3" style="width:200px;"> Layer to compare 2 tensors.
515 <td rowspan="3">
516 <ul>
517 <li>ANEURALNETWORKS_EQUAL
518 <li>ANEURALNETWORKS_GREATER
519 <li>ANEURALNETWORKS_GREATER_EQUAL
520 <li>ANEURALNETWORKS_LESS
521 <li>ANEURALNETWORKS_LESS_EQUAL
522 <li>ANEURALNETWORKS_NOT_EQUAL
523 </ul>
524 <td>CpuRef
525 <td>
526 <ul>
527 <li>All
528 </ul>
529 <td>
530 <table>
531 <tr><th>
532 <tr><td>BFLOAT16
533 <tr><td>FLOAT16
534 <tr><td>FLOAT32
535 <tr><td>BOOLEAN
536 <tr><td>QASYMMS8
537 <tr><td>QASYMMU8
538 <tr><td>QSYMMS16
539 <tr><td>SIGNED32
540 </table>
541<tr>
542 <td>CpuAcc
543 <td>
544 <ul>
545 <li>All
546 </ul>
547 <td>
548 <table>
549 <tr><th>
550 <tr><td>All
551 </table>
552<tr>
553 <td>GpuAcc
554 <td>
555 <ul>
556 <li>All
557 </ul>
558 <td>
559 <table>
560 <tr><th>
561 <tr><td>All
562 </table>
563<tr>
564 <td rowspan="3">ConcatLayer
565 <td rowspan="3" style="width:200px;"> Layer to concatenate tensors along a given axis.
566 <td rowspan="3">
567 <ul>
568 <li>ANEURALNETWORKS_CONCATENATION
569 </ul>
570 <td>CpuRef
571 <td>
572 <ul>
573 <li>All
574 </ul>
575 <td>
576 <table>
577 <tr><th>
578 <tr><td>BFLOAT16
579 <tr><td>FLOAT16
580 <tr><td>FLOAT32
581 <tr><td>QASYMMS8
582 <tr><td>QASYMMU8
583 <tr><td>QSYMMS16
584 </table>
585<tr>
586 <td>CpuAcc
587 <td>
588 <ul>
589 <li>All
590 </ul>
591 <td>
592 <table>
593 <tr><th>
594 <tr><td>QASYMMU8
595 <tr><td>QASYMMS8
596 <tr><td>FLOAT16
597 <tr><td>FLOAT32
598 </table>
599<tr>
600 <td>GpuAcc
601 <td>
602 <ul>
603 <li>All
604 </ul>
605 <td>
606 <table>
607 <tr><th>
608 <tr><td>QASYMMU8
609 <tr><td>QASYMMS8
610 <tr><td>FLOAT16
611 <tr><td>FLOAT32
612 </table>
613<tr>
614 <td rowspan="3">ConstantLayer
615 <td rowspan="3" style="width:200px;"> Layer to provide a constant tensor.
616 <td rowspan="3">
617 <ul>
618 <li>N/A
619 </ul>
620 <td>CpuRef
621 <td>
622 <ul>
623 <li>All
624 </ul>
625 <td>
626 <table>
627 <tr><th>
628 <tr><td>BFLOAT16
629 <tr><td>FLOAT16
630 <tr><td>FLOAT32
631 <tr><td>QASYMMS8
632 <tr><td>QASYMMU8
633 <tr><td>QSYMMS8
634 <tr><td>QSYMMS16
635 <tr><td>SIGNED32
636 </table>
637<tr>
638 <td>CpuAcc
639 <td>
640 <ul>
641 <li>All
642 </ul>
643 <td>
644 <table>
645 <tr><th>
646 <tr><td>All
647 </table>
648<tr>
649 <td>GpuAcc
650 <td>
651 <ul>
652 <li>All
653 </ul>
654 <td>
655 <table>
656 <tr><th>
657 <tr><td>All
658 </table>
659<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100660 <td rowspan="3">ConvertFp16ToFp32Layer
661 <td rowspan="3" style="width:200px;"> Layer to convert Float16 tensor to Float32 tensor.
662 <td rowspan="3">
663 <ul>
664 <li>N/A
665 </ul>
666 <td>CpuRef
667 <td>
668 <ul>
669 <li>All
670 </ul>
671 <td>
672 <table>
673 <tr><th>
674 <tr><td>FLOAT16
675 <tr><td>FLOAT32
676 </table>
677<tr>
678 <td>CpuAcc
679 <td>
680 <ul>
681 <li>All
682 </ul>
683 <td>
684 <table>
685 <tr><th>
686 <tr><td>FLOAT16
687 <tr><td>FLOAT32
688 </table>
689<tr>
690 <td>GpuAcc
691 <td>
692 <ul>
693 <li>All
694 </ul>
695 <td>
696 <table>
697 <tr><th>
698 <tr><td>FLOAT16
699 <tr><td>FLOAT32
700 </table>
701<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100702 <td rowspan="3">ConvertFp32ToFp16Layer
703 <td rowspan="3" style="width:200px;"> Layer to convert Float32 tensor to Float16 tensor.
704 <td rowspan="3">
705 <ul>
706 <li>N/A
707 </ul>
708 <td>CpuRef
709 <td>
710 <ul>
711 <li>All
712 </ul>
713 <td>
714 <table>
715 <tr><th>
716 <tr><td>FLOAT16
717 <tr><td>FLOAT32
718 </table>
719<tr>
720 <td>CpuAcc
721 <td>
722 <ul>
723 <li>All
724 </ul>
725 <td>
726 <table>
727 <tr><th>
728 <tr><td>FLOAT16
729 <tr><td>FLOAT32
730 </table>
731<tr>
732 <td>GpuAcc
733 <td>
734 <ul>
735 <li>All
736 </ul>
737 <td>
738 <table>
739 <tr><th>
740 <tr><td>FLOAT16
741 <tr><td>FLOAT32
742 </table>
743<tr>
744 <td rowspan="3">Convolution2dLayer
745 <td rowspan="3" style="width:200px;"> Layer to compute a convolution operation.
746 <td rowspan="3">
747 <ul>
748 <li>ANEURALNETWORKS_CONV_2D
749 <li>ANEURALNETWORKS_GROUPED_CONV_2D
750 </ul>
751 <td>CpuRef
752 <td>
753 <ul>
754 <li>All
755 </ul>
756 <td>
757 <table>
758 <tr><th>
759 <tr><td>BFLOAT16
760 <tr><td>FLOAT16
761 <tr><td>FLOAT32
762 <tr><td>QASYMMS8
763 <tr><td>QASYMMU8
764 <tr><td>QSYMMS16
765 </table>
766<tr>
767 <td>CpuAcc
768 <td>
769 <ul>
770 <li>NHWC
771 <li>NCHW
772 </ul>
773 <td>
774 <table>
775 <tr><th>
776 <tr><td>SIGNED32
777 <tr><td>FLOAT16
778 <tr><td>FLOAT32
779 <tr><td>QASYMMU8
780 <tr><td>QASYMMS8
781 <tr><td>QUANTIZEDSYMM8PERAXIS
782 </table>
783<tr>
784 <td>GpuAcc
785 <td>
786 <ul>
787 <li>NHWC
788 <li>NCHW
789 </ul>
790 <td>
791 <table>
792 <tr><th>
793 <tr><td>SIGNED32
794 <tr><td>FLOAT16
795 <tr><td>FLOAT32
796 <tr><td>QASYMMU8
797 <tr><td>QASYMMS8
798 <tr><td>QUANTIZEDSYMM8PERAXIS
799 </table>
800<tr>
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100801 <td rowspan="3">Convolution3dLayer
802 <td rowspan="3" style="width:200px;"> Layer to compute a 3D convolution operation.
803 <td rowspan="3">
804 <ul>
805 <li>N/A
806 </ul>
807 <td>CpuRef
808 <td>
809 <ul>
810 <li>NDHWC
811 </ul>
812 <td>
813 <table>
814 <tr><th>
815 <tr><td>BFLOAT16
816 <tr><td>FLOAT16
817 <tr><td>FLOAT32
818 <tr><td>QASYMMS8
819 <tr><td>QASYMMU8
820 <tr><td>QSYMMS8
821 <tr><td>QSYMMS16
822 </table>
823<tr>
824 <td>CpuAcc
825 <td>
826 <ul>
827 <li>N/A
828 </ul>
829 <td>
830 <ul>
831 <li>N/A
832 </ul>
833<tr>
834 <td>GpuAcc
835 <td>
836 <ul>
837 <li>N/A
838 </ul>
839 <td>
840 <ul>
841 <li>N/A
842 </ul>
843<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100844 <td rowspan="1">DebugLayer
845 <td rowspan="1" style="width:200px;"> Layer to print out inter layer tensor information.
846 <td rowspan="1">
847 <ul>
848 <li>N/A
849 </ul>
850 <td>CpuRef
851 <td>
852 <ul>
853 <li>All
854 </ul>
855 <td>
856 <table>
857 <tr><th>
858 <tr><td>BFLOAT16
859 <tr><td>FLOAT16
860 <tr><td>FLOAT32
861 <tr><td>QASYMMS8
862 <tr><td>QASYMMU8
863 <tr><td>QSYMMS8
864 <tr><td>QSYMMS16
865 <tr><td>SIGNED32
866 </table>
867<tr>
868 <td rowspan="3">DepthToSpaceLayer
869 <td rowspan="3" style="width:200px;"> Layer to perform Depth to Space transformation.
870 <td rowspan="3">
871 <ul>
872 <li>ANEURALNETWORKS_DEPTH_TO_SPACE
873 </ul>
874 <td>CpuRef
875 <td>
876 <ul>
877 <li>All
878 </ul>
879 <td>
880 <table>
881 <tr><th>
882 <tr><td>BFLOAT16
883 <tr><td>FLOAT16
884 <tr><td>FLOAT32
885 <tr><td>QASYMMS8
886 <tr><td>QASYMMU8
887 <tr><td>QSYMMS16
888 </table>
889<tr>
890 <td>CpuAcc
891 <td>
892 <ul>
893 <li>NHWC
894 <li>NCHW
895 </ul>
896 <td>
897 <table>
898 <tr><th>
899 <tr><td>All
900 </table>
901<tr>
902 <td>GpuAcc
903 <td>
904 <ul>
905 <li>NHWC
906 <li>NCHW
907 </ul>
908 <td>
909 <table>
910 <tr><th>
911 <tr><td>All
912 </table>
913<tr>
914 <td rowspan="3">DepthwiseConvolution2dLayer
915 <td rowspan="3" style="width:200px;"> Layer to compute a deconvolution or transpose convolution.
916 <td rowspan="3">
917 <ul>
918 <li>ANEURALNETWORKS_DEPTHWISE_CONV_2D
919 </ul>
920 <td>CpuRef
921 <td>
922 <ul>
923 <li>All
924 </ul>
925 <td>
926 <table>
927 <tr><th>
928 <tr><td>BFLOAT16
929 <tr><td>FLOAT16
930 <tr><td>FLOAT32
931 <tr><td>QASYMMS8
932 <tr><td>QASYMMU8
933 <tr><td>QSYMMS8
934 <tr><td>QSYMMS16
935 </table>
936<tr>
937 <td>CpuAcc
938 <td>
939 <ul>
940 <li>NHWC
941 <li>NCHW
942 </ul>
943 <td>
944 <table>
945 <tr><th>
946 <tr><td>FLOAT16
947 <tr><td>FLOAT32
948 <tr><td>SIGNED32
949 <tr><td>QASYMMU8
950 <tr><td>QASYMMS8
951 <tr><td>QUANTIZEDSYMM8PERAXIS
952 </table>
953<tr>
954 <td>GpuAcc
955 <td>
956 <ul>
957 <li>NHWC
958 <li>NCHW
959 </ul>
960 <td>
961 <table>
962 <tr><th>
963 <tr><td>FLOAT16
964 <tr><td>FLOAT32
965 <tr><td>SIGNED32
966 <tr><td>QASYMMU8
967 <tr><td>QASYMMS8
968 <tr><td>QUANTIZEDSYMM8PERAXIS
969 </table>
970<tr>
971 <td rowspan="3">DequantizeLayer
972 <td rowspan="3" style="width:200px;"> Layer to dequantize the values in a tensor.
973 <td rowspan="3">
974 <ul>
975 <li>ANEURALNETWORKS_DEQUANTIZE
976 </ul>
977 <td>CpuRef
978 <td>
979 <ul>
980 <li>All
981 </ul>
982 <td>
983 <table>
984 <tr><th>
985 <tr><td>QASYMMS8
986 <tr><td>QASYMMU8
987 <tr><td>QSYMMS8
988 <tr><td>QSYMMS16
989 </table>
990<tr>
991 <td>CpuAcc
992 <td>
993 <ul>
994 <li>All
995 </ul>
996 <td>
997 <table>
998 <tr><th>
999 <tr><td>FLOAT16
1000 <tr><td>FLOAT32
1001 <tr><td>QASYMMU8
1002 <tr><td>QASYMMS8
1003 <tr><td>QUANTIZEDSYMM8PERAXIS
1004 <tr><td>QSYMMS8
1005 <tr><td>QSYMMS16
1006 </table>
1007<tr>
1008 <td>GpuAcc
1009 <td>
1010 <ul>
1011 <li>All
1012 </ul>
1013 <td>
1014 <table>
1015 <tr><th>
1016 <tr><td>FLOAT16
1017 <tr><td>FLOAT32
1018 <tr><td>QASYMMU8
1019 <tr><td>QASYMMS8
1020 <tr><td>QUANTIZEDSYMM8PERAXIS
1021 <tr><td>QSYMMS8
1022 <tr><td>QSYMMS16
1023 </table>
1024<tr>
1025 <td rowspan="2">DetectionPostProcessLayer
1026 <td rowspan="2" style="width:200px;"> Layer to generate the detection output based on center size encoded boxes, class prediction and anchors by doing non maximum suppression (NMS).
1027 <td rowspan="2">
1028 <ul>
1029 <li>ANEURALNETWORKS_DETECTION_POSTPROCESSING
1030 </ul>
1031 <td>CpuRef
1032 <td>
1033 <ul>
1034 <li>All
1035 </ul>
1036 <td>
1037 <table>
1038 <tr><th>
1039 <tr><td>BFLOAT16
1040 <tr><td>FLOAT16
1041 <tr><td>FLOAT32
1042 <tr><td>QASYMMS8
1043 <tr><td>QASYMMU8
1044 <tr><td>QSYMMS16
1045 </table>
1046<tr>
1047 <td>CpuAcc
1048 <td>
1049 <ul>
1050 <li>All
1051 </ul>
1052 <td>
1053 <table>
1054 <tr><th>
1055 <tr><td>QASYMMU8
1056 <tr><td>QASYMMS8
1057 <tr><td>FLOAT32
1058 </table>
1059<tr>
1060 <td rowspan="3">DivisionLayer
1061 <td rowspan="3" style="width:200px;"> Layer to divide 2 tensors.
1062 <td rowspan="3">
1063 <ul>
1064 <li>ANEURALNETWORKS_DIV
1065 </ul>
1066 <td>CpuRef
1067 <td>
1068 <ul>
1069 <li>All
1070 </ul>
1071 <td>
1072 <table>
1073 <tr><th>
1074 <tr><td>BFLOAT16
1075 <tr><td>FLOAT16
1076 <tr><td>FLOAT32
1077 <tr><td>QASYMMS8
1078 <tr><td>QASYMMU8
1079 <tr><td>QSYMMS16
1080 <tr><td>SIGNED32
1081 </table>
1082<tr>
1083 <td>CpuAcc
1084 <td>
1085 <ul>
1086 <li>All
1087 </ul>
1088 <td>
1089 <table>
1090 <tr><th>
1091 <tr><td>FLOAT16
1092 <tr><td>FLOAT32
1093 </table>
1094<tr>
1095 <td>GpuAcc
1096 <td>
1097 <ul>
1098 <li>All
1099 </ul>
1100 <td>
1101 <table>
1102 <tr><th>
1103 <tr><td>FLOAT16
1104 <tr><td>FLOAT32
1105 </table>
1106<tr>
1107 <td rowspan="3">ElementwiseBaseLayer
1108 <td rowspan="3" style="width:200px;"> Layer to perform Add - Div - Max - Min - Mul operations.
1109 <td rowspan="3">
1110 <ul>
1111 <li>ANEURALNETWORKS_ADD
1112 <li>ANEURALNETWORKS_DIV
1113 <li>ANEURALNETWORKS_MAXIMUM
1114 <li>ANEURALNETWORKS_MINIMUM
1115 <li>ANEURALNETWORKS_MUL
1116 </ul>
1117 <td>CpuRef
1118 <td>
1119 <ul>
1120 <li>All
1121 </ul>
1122 <td>
1123 <table>
1124 <tr><th>
1125 <tr><td>BFLOAT16
1126 <tr><td>FLOAT16
1127 <tr><td>FLOAT32
1128 <tr><td>QASYMMS8
1129 <tr><td>QASYMMU8
1130 <tr><td>QSYMMS16
1131 <tr><td>SIGNED32
1132 </table>
1133<tr>
1134 <td>CpuAcc
1135 <td>
1136 <ul>
1137 <li>All
1138 </ul>
1139 <td>
1140 <table>
1141 <tr><th>
1142 <tr><td>QASYMMU8
1143 <tr><td>QASYMMS8
1144 <tr><td>QSYMMS16
1145 <tr><td>SIGNED32
1146 <tr><td>FLOAT16
1147 <tr><td>FLOAT32
1148 </table>
1149<tr>
1150 <td>GpuAcc
1151 <td>
1152 <ul>
1153 <li>All
1154 </ul>
1155 <td>
1156 <table>
1157 <tr><th>
1158 <tr><td>QASYMMU8
1159 <tr><td>QASYMMS8
1160 <tr><td>QSYMMS16
1161 <tr><td>SIGNED32
1162 <tr><td>FLOAT16
1163 <tr><td>FLOAT32
1164 </table>
1165<tr>
John Mcloughlin0ec00872023-05-15 17:03:49 +01001166 <td rowspan="3">ElementwiseBinaryLayer
1167 <td rowspan="3" style="width:200px;"> Layer to perform Power and Square Difference operations.
1168 <td rowspan="3">
1169 <ul>
1170 <li>ANEURALNETWORKS_POW
1171 </ul>
1172 <td>CpuRef
1173 <td>
1174 <ul>
1175 <li>All
1176 </ul>
1177 <td>
1178 <table>
1179 <tr><th>
1180 <tr><td>FLOAT16
1181 <tr><td>FLOAT32
1182 <tr><td>QASYMMS8
1183 <tr><td>QASYMMU8
1184 <tr><td>QSYMMS16
1185 <tr><td>SIGNED32
1186 </table>
1187<tr>
1188 <td>CpuAcc
1189 <td>
1190 <ul>
1191 <li>All
1192 </ul>
1193 <td>
1194 <table>
1195 <tr><th>
1196 <tr><td>FLOAT16
1197 <tr><td>FLOAT32
1198 </table>
1199<tr>
1200 <td>GpuAcc
1201 <td>
1202 <ul>
1203 <li>All
1204 </ul>
1205 <td>
1206 <table>
1207 <tr><th>
1208 <tr><td>FLOAT16
1209 <tr><td>FLOAT32
1210 </table>
1211<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01001212 <td rowspan="3">ElementwiseUnaryLayer
Nikhil Raj930e1a22023-06-08 09:49:46 +01001213 <td rowspan="3" style="width:200px;"> Layer to perform Rsqrt - Exp - Neg - Log - Abs - Sin - Sqrt - Ceil operations.
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01001214 <td rowspan="3">
1215 <ul>
1216 <li>ANEURALNETWORKS_ABS
1217 <li>ANEURALNETWORKS_EXP
1218 <li>ANEURALNETWORKS_LOG
1219 <li>ANEURALNETWORKS_NEG
1220 <li>ANEURALNETWORKS_RSQRT
1221 <li>ANEURALNETWORKS_SIN
1222 <li>ANEURALNETWORKS_SQRT
1223 </ul>
1224 <td>CpuRef
1225 <td>
1226 <ul>
1227 <li>All
1228 </ul>
1229 <td>
1230 <table>
1231 <tr><th>
1232 <tr><td>BFLOAT16
1233 <tr><td>FLOAT16
1234 <tr><td>FLOAT32
1235 <tr><td>QASYMMS8
1236 <tr><td>QASYMMU8
1237 <tr><td>QSYMMS16
1238 </table>
1239<tr>
1240 <td>CpuAcc
1241 <td>
1242 <ul>
1243 <li>All
1244 </ul>
1245 <td>
1246 <table>
1247 <tr><th>
1248 <tr><td>FLOAT16
1249 <tr><td>FLOAT32
1250 <tr><td>SIGNED32
1251 </table>
1252<tr>
1253 <td>GpuAcc
1254 <td>
1255 <ul>
1256 <li>All
1257 </ul>
1258 <td>
1259 <table>
1260 <tr><th>
1261 <tr><td>FLOAT16
1262 <tr><td>FLOAT32
1263 </table>
1264<tr>
1265 <td rowspan="1">FakeQuantizationLayer
1266 <td rowspan="1" style="width:200px;"> Layer to quantize float values and dequantize afterwards. The current implementation does not dequantize the values.
1267 <td rowspan="1">
1268 <ul>
1269 <li>N/A
1270 </ul>
1271 <td>CpuRef
1272 <td>
1273 <ul>
1274 <li>All
1275 </ul>
1276 <td>
1277 <table>
1278 <tr><th>
1279 <tr><td>FLOAT32
1280 </table>
1281<tr>
1282 <td rowspan="3">FillLayer
1283 <td rowspan="3" style="width:200px;"> Layer to set the values of a tensor with a given value.
1284 <td rowspan="3">
1285 <ul>
1286 <li>ANEURALNETWORKS_FILL
1287 </ul>
1288 <td>CpuRef
1289 <td>
1290 <ul>
1291 <li>All
1292 </ul>
1293 <td>
1294 <table>
1295 <tr><th>
1296 <tr><td>FLOAT16
1297 <tr><td>FLOAT32
1298 <tr><td>SIGNED32
1299 </table>
1300<tr>
1301 <td>CpuAcc
1302 <td>
1303 <ul>
1304 <li>All
1305 </ul>
1306 <td>
1307 <table>
1308 <tr><th>
1309 <tr><td>All
1310 </table>
1311<tr>
1312 <td>GpuAcc
1313 <td>
1314 <ul>
1315 <li>All
1316 </ul>
1317 <td>
1318 <table>
1319 <tr><th>
1320 <tr><td>All
1321 </table>
1322<tr>
1323 <td rowspan="3">FloorLayer
1324 <td rowspan="3" style="width:200px;"> Layer to round the value to the lowest whole number.
1325 <td rowspan="3">
1326 <ul>
1327 <li>ANEURALNETWORKS_FLOOR
1328 </ul>
1329 <td>CpuRef
1330 <td>
1331 <ul>
1332 <li>All
1333 </ul>
1334 <td>
1335 <table>
1336 <tr><th>
1337 <tr><td>BFLOAT16
1338 <tr><td>FLOAT16
1339 <tr><td>FLOAT32
1340 </table>
1341<tr>
1342 <td>CpuAcc
1343 <td>
1344 <ul>
1345 <li>All
1346 </ul>
1347 <td>
1348 <table>
1349 <tr><th>
1350 <tr><td>FLOAT32
1351 <tr><td>FLOAT16
1352 </table>
1353<tr>
1354 <td>GpuAcc
1355 <td>
1356 <ul>
1357 <li>All
1358 </ul>
1359 <td>
1360 <table>
1361 <tr><th>
1362 <tr><td>FLOAT32
1363 <tr><td>FLOAT16
1364 </table>
1365<tr>
1366 <td rowspan="3">FullyConnectedLayer
1367 <td rowspan="3" style="width:200px;"> Layer to perform a fully connected / dense operation.
1368 <td rowspan="3">
1369 <ul>
1370 <li>ANEURALNETWORKS_FULLY_CONNECTED
1371 </ul>
1372 <td>CpuRef
1373 <td>
1374 <ul>
1375 <li>All
1376 </ul>
1377 <td>
1378 <table>
1379 <tr><th>
1380 <tr><td>BFLOAT16
1381 <tr><td>FLOAT16
1382 <tr><td>FLOAT32
1383 <tr><td>QASYMMS8
1384 <tr><td>QASYMMU8
1385 <tr><td>QSYMMS16
1386 </table>
1387<tr>
1388 <td>CpuAcc
1389 <td>
1390 <ul>
1391 <li>NHWC
1392 <li>NCHW
1393 </ul>
1394 <td>
1395 <table>
1396 <tr><th>
1397 <tr><td>SIGNED32
1398 <tr><td>FLOAT16
1399 <tr><td>FLOAT32
1400 <tr><td>QASYMMU8
1401 <tr><td>QASYMMS8
1402 </table>
1403<tr>
1404 <td>GpuAcc
1405 <td>
1406 <ul>
1407 <li>NHWC
1408 <li>NCHW
1409 </ul>
1410 <td>
1411 <table>
1412 <tr><th>
1413 <tr><td>SIGNED32
1414 <tr><td>FLOAT16
1415 <tr><td>FLOAT32
1416 <tr><td>QASYMMU8
1417 <tr><td>QASYMMS8
1418 </table>
1419<tr>
1420 <td rowspan="3">GatherLayer
1421 <td rowspan="3" style="width:200px;"> Layer to perform the gather operation along the chosen axis.
1422 <td rowspan="3">
1423 <ul>
1424 <li>ANEURALNETWORKS_GATHER
1425 </ul>
1426 <td>CpuRef
1427 <td>
1428 <ul>
1429 <li>All
1430 </ul>
1431 <td>
1432 <table>
1433 <tr><th>
1434 <tr><td>BFLOAT16
1435 <tr><td>FLOAT16
1436 <tr><td>FLOAT32
1437 <tr><td>QASYMMS8
1438 <tr><td>QASYMMU8
1439 <tr><td>QSYMMS16
1440 <tr><td>SIGNED32
1441 </table>
1442<tr>
1443 <td>CpuAcc
1444 <td>
1445 <ul>
1446 <li>All
1447 </ul>
1448 <td>
1449 <table>
1450 <tr><th>
1451 <tr><td>All
1452 </table>
1453<tr>
1454 <td>GpuAcc
1455 <td>
1456 <ul>
1457 <li>All
1458 </ul>
1459 <td>
1460 <table>
1461 <tr><th>
1462 <tr><td>All
1463 </table>
1464<tr>
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001465 <td rowspan="3">GatherNdLayer
1466 <td rowspan="3" style="width:200px;"> Layer to perform the gatherNd operation.
1467 <td rowspan="3">
1468 <ul>
1469 <li>N/A
1470 </ul>
1471 <td>CpuRef
1472 <td>
1473 <ul>
1474 <li>All
1475 </ul>
1476 <td>
1477 <table>
1478 <tr><th>
1479 <tr><td>BFLOAT16
1480 <tr><td>FLOAT16
1481 <tr><td>FLOAT32
1482 <tr><td>QASYMMS8
1483 <tr><td>QASYMMU8
1484 <tr><td>QSYMMS16
1485 <tr><td>SIGNED32
1486 </table>
1487<tr>
1488 <td>CpuAcc
1489 <td>
1490 <ul>
Teresa Charlinbd22c7d2022-04-26 18:14:12 +01001491 <li>All
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001492 </ul>
1493 <td>
Teresa Charlinbd22c7d2022-04-26 18:14:12 +01001494 <table>
1495 <tr><th>
1496 <tr><td>BFLOAT16
1497 <tr><td>FLOAT16
1498 <tr><td>FLOAT32
1499 <tr><td>QASYMMS8
1500 <tr><td>QASYMMU8
1501 <tr><td>QSYMMS16
1502 <tr><td>SIGNED32
1503 </table>
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001504<tr>
1505 <td>GpuAcc
1506 <td>
1507 <ul>
Teresa Charlin989e2f62022-04-27 16:26:11 +01001508 <li>All
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001509 </ul>
1510 <td>
Teresa Charlin989e2f62022-04-27 16:26:11 +01001511 <table>
1512 <tr><th>
1513 <tr><td>BFLOAT16
1514 <tr><td>FLOAT16
1515 <tr><td>FLOAT32
1516 <tr><td>QASYMMS8
1517 <tr><td>QASYMMU8
1518 <tr><td>QSYMMS16
1519 <tr><td>SIGNED32
1520 </table>
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001521<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01001522 <td rowspan="1">InputLayer
1523 <td rowspan="1" style="width:200px;"> Special layer used to provide input data to the computational network.
1524 <td rowspan="1">
1525 <ul>
1526 <li>N/A
1527 </ul>
1528 <td>All
1529 <td>
1530 <ul>
1531 <li>All
1532 </ul>
1533 <td>
1534 <table>
1535 <tr><th>
1536 <tr><td>All
1537 </table>
1538<tr>
1539 <td rowspan="3">InstanceNormalizationLayer
1540 <td rowspan="3" style="width:200px;"> Layer to perform an instance normalization on a given axis.
1541 <td rowspan="3">
1542 <ul>
1543 <li>ANEURALNETWORKS_INSTANCE_NORMALIZATION
1544 </ul>
1545 <td>CpuRef
1546 <td>
1547 <ul>
1548 <li>All
1549 </ul>
1550 <td>
1551 <table>
1552 <tr><th>
1553 <tr><td>BFLOAT16
1554 <tr><td>FLOAT16
1555 <tr><td>FLOAT32
1556 </table>
1557<tr>
1558 <td>CpuAcc
1559 <td>
1560 <ul>
1561 <li>NHWC
1562 <li>NCHW
1563 </ul>
1564 <td>
1565 <table>
1566 <tr><th>
1567 <tr><td>FLOAT16
1568 <tr><td>FLOAT32
1569 </table>
1570<tr>
1571 <td>GpuAcc
1572 <td>
1573 <ul>
1574 <li>NHWC
1575 <li>NCHW
1576 </ul>
1577 <td>
1578 <table>
1579 <tr><th>
1580 <tr><td>FLOAT16
1581 <tr><td>FLOAT32
1582 </table>
1583<tr>
1584 <td rowspan="3">L2NormalizationLayer
1585 <td rowspan="3" style="width:200px;"> Layer to perform an L2 normalization on a given axis.
1586 <td rowspan="3">
1587 <ul>
1588 <li>ANEURALNETWORKS_L2_NORMALIZATION
1589 </ul>
1590 <td>CpuRef
1591 <td>
1592 <ul>
1593 <li>All
1594 </ul>
1595 <td>
1596 <table>
1597 <tr><th>
1598 <tr><td>BFLOAT16
1599 <tr><td>FLOAT16
1600 <tr><td>FLOAT32
1601 <tr><td>QASYMMS8
1602 <tr><td>QASYMMU8
1603 <tr><td>QSYMMS16
1604 </table>
1605<tr>
1606 <td>CpuAcc
1607 <td>
1608 <ul>
1609 <li>NHWC
1610 <li>NCHW
1611 </ul>
1612 <td>
1613 <table>
1614 <tr><th>
1615 <tr><td>FLOAT16
1616 <tr><td>FLOAT32
1617 </table>
1618<tr>
1619 <td>GpuAcc
1620 <td>
1621 <ul>
1622 <li>NHWC
1623 <li>NCHW
1624 </ul>
1625 <td>
1626 <table>
1627 <tr><th>
1628 <tr><td>FLOAT16
1629 <tr><td>FLOAT32
1630 </table>
1631<tr>
1632 <td rowspan="3">LogSoftmaxLayer
1633 <td rowspan="3" style="width:200px;"> Layer to perform the log softmax activations given logits.
1634 <td rowspan="3">
1635 <ul>
1636 <li>N/A
1637 </ul>
1638 <td>CpuRef
1639 <td>
1640 <ul>
1641 <li>All
1642 </ul>
1643 <td>
1644 <table>
1645 <tr><th>
1646 <tr><td>BFLOAT16
1647 <tr><td>FLOAT16
1648 <tr><td>FLOAT32
1649 </table>
1650<tr>
1651 <td>CpuAcc
1652 <td>
1653 <ul>
1654 <li>All
1655 </ul>
1656 <td>
1657 <table>
1658 <tr><th>
1659 <tr><td>QASYMMU8
1660 <tr><td>QASYMMS8
1661 <tr><td>FLOAT16
1662 <tr><td>FLOAT32
1663 </table>
1664<tr>
1665 <td>GpuAcc
1666 <td>
1667 <ul>
1668 <li>All
1669 </ul>
1670 <td>
1671 <table>
1672 <tr><th>
1673 <tr><td>QASYMMU8
1674 <tr><td>QASYMMS8
1675 <tr><td>FLOAT16
1676 <tr><td>FLOAT32
1677 </table>
1678<tr>
1679 <td rowspan="3">LogicalBinaryLayer
1680 <td rowspan="3" style="width:200px;"> Layer to perform Logical AND - Logical NOT - Logical OR operations.
1681 <td rowspan="3">
1682 <ul>
1683 <li>ANEURALNETWORKS_LOGICAL_AND
1684 <li>ANEURALNETWORKS_LOGICAL_NOT
1685 <li>ANEURALNETWORKS_LOGICAL_OR
1686 </ul>
1687 <td>CpuRef
1688 <td>
1689 <ul>
1690 <li>All
1691 </ul>
1692 <td>
1693 <table>
1694 <tr><th>
1695 <tr><td>BOOLEAN
1696 </table>
1697<tr>
1698 <td>CpuAcc
1699 <td>
1700 <ul>
1701 <li>All
1702 </ul>
1703 <td>
1704 <table>
1705 <tr><th>
1706 <tr><td>BOOLEAN
1707 </table>
1708<tr>
1709 <td>GpuAcc
1710 <td>
1711 <ul>
1712 <li>All
1713 </ul>
1714 <td>
1715 <table>
1716 <tr><th>
1717 <tr><td>BOOLEAN
1718 </table>
1719<tr>
1720 <td rowspan="3">LstmLayer
1721 <td rowspan="3" style="width:200px;"> Layer to perform a single time step in a Long Short-Term Memory (LSTM) operation.
1722 <td rowspan="3">
1723 <ul>
1724 <li>ANEURALNETWORKS_LSTM
1725 </ul>
1726 <td>CpuRef
1727 <td>
1728 <ul>
1729 <li>All
1730 </ul>
1731 <td>
1732 <table>
1733 <tr><th>
1734 <tr><td>BFLOAT16
1735 <tr><td>FLOAT16
1736 <tr><td>QSYMMS16
1737 </table>
1738<tr>
1739 <td>CpuAcc
1740 <td>
1741 <ul>
1742 <li>All
1743 </ul>
1744 <td>
1745 <table>
1746 <tr><th>
1747 <tr><td>FLOAT16
1748 <tr><td>FLOAT32
1749 </table>
1750<tr>
1751 <td>GpuAcc
1752 <td>
1753 <ul>
1754 <li>All
1755 </ul>
1756 <td>
1757 <table>
1758 <tr><th>
1759 <tr><td>FLOAT16
1760 <tr><td>FLOAT32
1761 </table>
1762<tr>
1763 <td rowspan="3">MapLayer
1764 <td rowspan="3" style="width:200px;"> Layer to perform map operation on tensor.
1765 <td rowspan="3">
1766 <ul>
1767 <li>N/A
1768 </ul>
1769 <td>CpuRef
1770 <td>
1771 <ul>
1772 <li>All
1773 </ul>
1774 <td>
1775 <table>
1776 <tr><th>
1777 <tr><td>All
1778 </table>
1779<tr>
1780 <td>CpuAcc
1781 <td>
1782 <ul>
1783 <li>All
1784 </ul>
1785 <td>
1786 <table>
1787 <tr><th>
1788 <tr><td>All
1789 </table>
1790<tr>
1791 <td>GpuAcc
1792 <td>
1793 <ul>
1794 <li>All
1795 </ul>
1796 <td>
1797 <table>
1798 <tr><th>
1799 <tr><td>All
1800 </table>
1801<tr>
1802 <td rowspan="3">MaximumLayer
1803 <td rowspan="3" style="width:200px;"> Layer to perform an elementwise maximum of two tensors.
1804 <td rowspan="3">
1805 <ul>
1806 <li>N/A
1807 </ul>
1808 <td>CpuRef
1809 <td>
1810 <ul>
1811 <li>All
1812 </ul>
1813 <td>
1814 <table>
1815 <tr><th>
1816 <tr><td>BFLOAT16
1817 <tr><td>FLOAT16
1818 <tr><td>FLOAT32
1819 <tr><td>QASYMMS8
1820 <tr><td>QASYMMU8
1821 <tr><td>QSYMMS16
1822 <tr><td>SIGNED32
1823 </table>
1824<tr>
1825 <td>CpuAcc
1826 <td>
1827 <ul>
1828 <li>All
1829 </ul>
1830 <td>
1831 <table>
1832 <tr><th>
1833 <tr><td>QASYMMU8
1834 <tr><td>QASYMMS8
1835 <tr><td>FLOAT16
1836 <tr><td>FLOAT32
1837 <tr><td>SIGNED32
1838 </table>
1839<tr>
1840 <td>GpuAcc
1841 <td>
1842 <ul>
1843 <li>All
1844 </ul>
1845 <td>
1846 <table>
1847 <tr><th>
1848 <tr><td>QASYMMU8
1849 <tr><td>QASYMMS8
1850 <tr><td>QSYMMS16
1851 <tr><td>FLOAT16
1852 <tr><td>FLOAT32
1853 <tr><td>SIGNED32
1854 </table>
1855<tr>
1856 <td rowspan="3">MeanLayer
1857 <td rowspan="3" style="width:200px;"> Layer to perform reduce mean operation.
1858 <td rowspan="3">
1859 <ul>
1860 <li>ANEURALNETWORKS_MEAN
1861 </ul>
1862 <td>CpuRef
1863 <td>
1864 <ul>
1865 <li>All
1866 </ul>
1867 <td>
1868 <table>
1869 <tr><th>
1870 <tr><td>BFLOAT16
1871 <tr><td>FLOAT16
1872 <tr><td>FLOAT32
1873 <tr><td>QASYMMS8
1874 <tr><td>QASYMMU8
1875 <tr><td>QSYMMS16
1876 </table>
1877<tr>
1878 <td>CpuAcc
1879 <td>
1880 <ul>
1881 <li>All
1882 </ul>
1883 <td>
1884 <table>
1885 <tr><th>
1886 <tr><td>QASYMMU8
1887 <tr><td>QASYMMS8
1888 <tr><td>FLOAT16
1889 <tr><td>FLOAT32
1890 </table>
1891<tr>
1892 <td>GpuAcc
1893 <td>
1894 <ul>
1895 <li>All
1896 </ul>
1897 <td>
1898 <table>
1899 <tr><th>
1900 <tr><td>QASYMMU8
1901 <tr><td>QASYMMS8
1902 <tr><td>FLOAT16
1903 <tr><td>FLOAT32
1904 </table>
1905<tr>
1906 <td rowspan="3">MemCopyLayer
1907 <td rowspan="3" style="width:200px;"> Layer to perform memory copy operation.
1908 <td rowspan="3">
1909 <ul>
1910 <li>N/A
1911 </ul>
1912 <td>CpuRef
1913 <td>
1914 <ul>
1915 <li>All
1916 </ul>
1917 <td>
1918 <table>
1919 <tr><th>
1920 <tr><td>BFLOAT16
1921 <tr><td>FLOAT16
1922 <tr><td>FLOAT32
1923 <tr><td>QASYMMS8
1924 <tr><td>QASYMMU8
1925 <tr><td>QSYMMS16
1926 <tr><td>BOOLEAN
1927 </table>
1928<tr>
1929 <td>CpuAcc
1930 <td>
1931 <ul>
1932 <li>All
1933 </ul>
1934 <td>
1935 <table>
1936 <tr><th>
1937 <tr><td>All
1938 </table>
1939<tr>
1940 <td>GpuAcc
1941 <td>
1942 <ul>
1943 <li>All
1944 </ul>
1945 <td>
1946 <table>
1947 <tr><th>
1948 <tr><td>All
1949 </table>
1950<tr>
1951 <td rowspan="3">MemImportLayer
1952 <td rowspan="3" style="width:200px;"> Layer to perform memory import operation.
1953 <td rowspan="3">
1954 <ul>
1955 <li>N/A
1956 </ul>
1957 <td>CpuRef
1958 <td>
1959 <ul>
1960 <li>All
1961 </ul>
1962 <td>
1963 <table>
1964 <tr><th>
1965 <tr><td>All
1966 </table>
1967<tr>
1968 <td>CpuAcc
1969 <td>
1970 <ul>
1971 <li>All
1972 </ul>
1973 <td>
1974 <table>
1975 <tr><th>
1976 <tr><td>All
1977 </table>
1978<tr>
1979 <td>GpuAcc
1980 <td>
1981 <ul>
1982 <li>All
1983 </ul>
1984 <td>
1985 <table>
1986 <tr><th>
1987 <tr><td>All
1988 </table>
1989<tr>
1990 <td rowspan="3">MergeLayer
1991 <td rowspan="3" style="width:200px;"> Layer to concatenate tensors along a given axis.
1992 <td rowspan="3">
1993 <ul>
1994 <li>ANEURALNETWORKS_CONCATENATION
1995 </ul>
1996 <td>CpuRef
1997 <td>
1998 <ul>
1999 <li>All
2000 </ul>
2001 <td>
2002 <table>
2003 <tr><th>
2004 <tr><td>BFLOAT16
2005 <tr><td>FLOAT16
2006 <tr><td>FLOAT32
2007 <tr><td>QASYMMS8
2008 <tr><td>QASYMMU8
2009 <tr><td>QSYMMS16
2010 </table>
2011<tr>
2012 <td>CpuAcc
2013 <td>
2014 <ul>
2015 <li>All
2016 </ul>
2017 <td>
2018 <table>
2019 <tr><th>
2020 <tr><td>QASYMMU8
2021 <tr><td>QASYMMS8
2022 <tr><td>FLOAT16
2023 <tr><td>FLOAT32
2024 </table>
2025<tr>
2026 <td>GpuAcc
2027 <td>
2028 <ul>
2029 <li>All
2030 </ul>
2031 <td>
2032 <table>
2033 <tr><th>
2034 <tr><td>QASYMMU8
2035 <tr><td>QASYMMS8
2036 <tr><td>FLOAT16
2037 <tr><td>FLOAT32
2038 </table>
2039<tr>
2040 <td rowspan="3">MinimumLayer
2041 <td rowspan="3" style="width:200px;"> Layer to perform an elementwise minimum of two tensors.
2042 <td rowspan="3">
2043 <ul>
2044 <li>ANEURALNETWORKS_MINIMUM
2045 </ul>
2046 <td>CpuRef
2047 <td>
2048 <ul>
2049 <li>All
2050 </ul>
2051 <td>
2052 <table>
2053 <tr><th>
2054 <tr><td>BFLOAT16
2055 <tr><td>FLOAT16
2056 <tr><td>FLOAT32
2057 <tr><td>QASYMMS8
2058 <tr><td>QASYMMU8
2059 <tr><td>QSYMMS16
2060 <tr><td>SIGNED32
2061 </table>
2062<tr>
2063 <td>CpuAcc
2064 <td>
2065 <ul>
2066 <li>All
2067 </ul>
2068 <td>
2069 <table>
2070 <tr><th>
2071 <tr><td>QASYMMU8
2072 <tr><td>QASYMMS8
2073 <tr><td>QSYMMS16
2074 <tr><td>FLOAT16
2075 <tr><td>FLOAT32
2076 </table>
2077<tr>
2078 <td>GpuAcc
2079 <td>
2080 <ul>
2081 <li>All
2082 </ul>
2083 <td>
2084 <table>
2085 <tr><th>
2086 <tr><td>QASYMMU8
2087 <tr><td>QASYMMS8
2088 <tr><td>QSYMMS16
2089 <tr><td>FLOAT16
2090 <tr><td>FLOAT32
2091 <tr><td>SIGNED32
2092 </table>
2093<tr>
2094 <td rowspan="3">MultiplicationLayer
2095 <td rowspan="3" style="width:200px;"> Layer to perform an elementwise multiplication of two tensors.
2096 <td rowspan="3">
2097 <ul>
2098 <li>ANEURALNETWORKS_MUL
2099 </ul>
2100 <td>CpuRef
2101 <td>
2102 <ul>
2103 <li>All
2104 </ul>
2105 <td>
2106 <table>
2107 <tr><th>
2108 <tr><td>BFLOAT16
2109 <tr><td>FLOAT16
2110 <tr><td>FLOAT32
2111 <tr><td>QASYMMS8
2112 <tr><td>QASYMMU8
2113 <tr><td>QSYMMS16
2114 <tr><td>SIGNED32
2115 </table>
2116<tr>
2117 <td>CpuAcc
2118 <td>
2119 <ul>
2120 <li>All
2121 </ul>
2122 <td>
2123 <table>
2124 <tr><th>
2125 <tr><td>QASYMMU8
2126 <tr><td>QASYMMS8
2127 <tr><td>QSYMMS16
2128 <tr><td>SIGNED32
2129 <tr><td>FLOAT16
2130 <tr><td>FLOAT32
2131 </table>
2132<tr>
2133 <td>GpuAcc
2134 <td>
2135 <ul>
2136 <li>All
2137 </ul>
2138 <td>
2139 <table>
2140 <tr><th>
2141 <tr><td>QASYMMU8
2142 <tr><td>QASYMMS8
2143 <tr><td>QSYMMS16
2144 <tr><td>SIGNED32
2145 <tr><td>FLOAT16
2146 <tr><td>FLOAT32
2147 <tr><td>SIGNED32
2148 </table>
2149<tr>
2150 <td rowspan="3">NormalizationLayer
2151 <td rowspan="3" style="width:200px;"> Layer to compute normalization operation.
2152 <td rowspan="3">
2153 <ul>
2154 <li>ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION
2155 </ul>
2156 <td>CpuRef
2157 <td>
2158 <ul>
2159 <li>All
2160 </ul>
2161 <td>
2162 <table>
2163 <tr><th>
2164 <tr><td>BFLOAT16
2165 <tr><td>FLOAT16
2166 <tr><td>FLOAT32
2167 <tr><td>QASYMMS8
2168 <tr><td>QASYMMU8
2169 <tr><td>QSYMMS16
2170 </table>
2171<tr>
2172 <td>CpuAcc
2173 <td>
2174 <ul>
2175 <li>NHWC
2176 <li>NCHW
2177 </ul>
2178 <td>
2179 <table>
2180 <tr><th>
2181 <tr><td>FLOAT32
2182 <tr><td>FLOAT16
2183 </table>
2184<tr>
2185 <td>GpuAcc
2186 <td>
2187 <ul>
2188 <li>NHWC
2189 <li>NCHW
2190 </ul>
2191 <td>
2192 <table>
2193 <tr><th>
2194 <tr><td>FLOAT32
2195 <tr><td>FLOAT16
2196 </table>
2197<tr>
2198 <td rowspan="1">OutputLayer
2199 <td rowspan="1" style="width:200px;"> A special layer providing access to a user supplied buffer into which the output of a network can be written.
2200 <td rowspan="1">
2201 <ul>
2202 <li>N/A
2203 </ul>
2204 <td>All
2205 <td>
2206 <ul>
2207 <li>All
2208 </ul>
2209 <td>
2210 <table>
2211 <tr><th>
2212 <tr><td>All
2213 </table>
2214<tr>
2215 <td rowspan="3">PadLayer
2216 <td rowspan="3" style="width:200px;"> Layer to pad a tensor.
2217 <td rowspan="3">
2218 <ul>
2219 <li>ANEURALNETWORKS_PAD
2220 <li>ANEURALNETWORKS_PAD_V2
2221 </ul>
2222 <td>CpuRef
2223 <td>
2224 <ul>
2225 <li>All
2226 </ul>
2227 <td>
2228 <table>
2229 <tr><th>
2230 <tr><td>BFLOAT16
2231 <tr><td>FLOAT16
2232 <tr><td>FLOAT32
2233 <tr><td>QASYMMS8
2234 <tr><td>QASYMMU8
2235 <tr><td>QSYMMS16
2236 </table>
2237<tr>
2238 <td>CpuAcc
2239 <td>
2240 <ul>
2241 <li>NHWC
2242 <li>NCHW
2243 </ul>
2244 <td>
2245 <table>
2246 <tr><th>
2247 <tr><td>All
2248 </table>
2249<tr>
2250 <td>GpuAcc
2251 <td>
2252 <ul>
2253 <li>NHWC
2254 <li>NCHW
2255 </ul>
2256 <td>
2257 <table>
2258 <tr><th>
2259 <tr><td>All
2260 </table>
2261<tr>
2262 <td rowspan="3">PermuteLayer
2263 <td rowspan="3" style="width:200px;"> Layer to transpose an ND tensor.
2264 <td rowspan="3">
2265 <ul>
2266 <li>ANEURALNETWORKS_TRANSPOSE
2267 </ul>
2268 <td>CpuRef
2269 <td>
2270 <ul>
2271 <li>All
2272 </ul>
2273 <td>
2274 <table>
2275 <tr><th>
2276 <tr><td>BFLOAT16
2277 <tr><td>FLOAT16
2278 <tr><td>FLOAT32
2279 <tr><td>QASYMMS8
2280 <tr><td>QASYMMU8
2281 <tr><td>QSYMMS16
2282 </table>
2283<tr>
2284 <td>CpuAcc
2285 <td>
2286 <ul>
2287 <li>NHWC
2288 <li>NCHW
2289 </ul>
2290 <td>
2291 <table>
2292 <tr><th>
2293 <tr><td>All
2294 </table>
2295<tr>
2296 <td>GpuAcc
2297 <td>
2298 <ul>
2299 <li>NHWC
2300 <li>NCHW
2301 </ul>
2302 <td>
2303 <table>
2304 <tr><th>
2305 <tr><td>All
2306 </table>
2307<tr>
2308 <td rowspan="3">Pooling2dLayer
Tamás Nyíri7b885b32021-10-26 14:47:57 +01002309 <td rowspan="3" style="width:200px;"> Layer to perform 2D pooling with the specified pooling operation.
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002310 <td rowspan="3">
2311 <ul>
2312 <li>ANEURALNETWORKS_AVERAGE_POOL_2D
2313 <li>ANEURALNETWORKS_L2_POOL_2D
2314 <li>ANEURALNETWORKS_MAX_POOL_2D
2315 </ul>
2316 <td>CpuRef
2317 <td>
2318 <ul>
2319 <li>All
2320 </ul>
2321 <td>
2322 <table>
2323 <tr><th>
2324 <tr><td>BFLOAT16
2325 <tr><td>FLOAT16
2326 <tr><td>FLOAT32
2327 <tr><td>QASYMMS8
2328 <tr><td>QASYMMU8
2329 <tr><td>QSYMMS16
2330 </table>
2331<tr>
2332 <td>CpuAcc
2333 <td>
2334 <ul>
2335 <li>NHWC
2336 <li>NCHW
2337 </ul>
2338 <td>
2339 <table>
2340 <tr><th>
2341 <tr><td>QASYMMU8
2342 <tr><td>QASYMMS8
2343 <tr><td>FLOAT16
2344 <tr><td>FLOAT32
2345 </table>
2346<tr>
2347 <td>GpuAcc
2348 <td>
2349 <ul>
2350 <li>NHWC
2351 <li>NCHW
2352 </ul>
2353 <td>
2354 <table>
2355 <tr><th>
2356 <tr><td>QASYMMU8
2357 <tr><td>QASYMMS8
2358 <tr><td>FLOAT16
2359 <tr><td>FLOAT32
2360 </table>
2361<tr>
Tamás Nyíri7b885b32021-10-26 14:47:57 +01002362 <td rowspan="3">Pooling3dLayer
2363 <td rowspan="3" style="width:200px;"> Layer to perform 3D pooling with the specified pooling operation.
2364 <td rowspan="3">
2365 <ul>
2366 <li>ANEURALNETWORKS_AVERAGE_POOL_3D
2367 <li>ANEURALNETWORKS_L2_POOL_3D
2368 <li>ANEURALNETWORKS_MAX_POOL_3D
2369 </ul>
2370 <td>CpuRef
2371 <td>
2372 <ul>
2373 <li>NDHWC
2374 </ul>
2375 <td>
2376 <table>
2377 <tr><th>
2378 <tr><td>BFLOAT16
2379 <tr><td>FLOAT16
2380 <tr><td>FLOAT32
2381 <tr><td>QASYMMS8
2382 <tr><td>QASYMMU8
2383 <tr><td>QSYMMS16
2384 </table>
2385<tr>
2386 <td>CpuAcc
2387 <td>
2388 <ul>
2389 <li>NA
2390 </ul>
2391 <td>
2392<tr>
2393 <td>GpuAcc
2394 <td>
2395 <ul>
2396 <li>NDHWC
2397 </ul>
Nikhil Raj930e1a22023-06-08 09:49:46 +01002398 <td>
Tamás Nyíri7b885b32021-10-26 14:47:57 +01002399<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002400 <td rowspan="1">PreCompiledLayer
2401 <td rowspan="1" style="width:200px;"> Opaque layer provided by a backend which provides an executable representation of a subgraph from the original network.
2402 <td rowspan="1">
2403 <ul>
2404 <li>N/A
2405 </ul>
2406 <td>N/A
2407 <td>N/A
2408 <td>N/A
2409<tr>
2410 <td rowspan="3">PreluLayer
2411 <td rowspan="3" style="width:200px;"> Layer to compute the activation layer with the PRELU activation function.
2412 <td rowspan="3">
2413 <ul>
2414 <li>ANEURALNETWORKS_PRELU
2415 </ul>
2416 <td>CpuRef
2417 <td>
2418 <ul>
2419 <li>All
2420 </ul>
2421 <td>
2422 <table>
2423 <tr><th>
2424 <tr><td>BFLOAT16
2425 <tr><td>FLOAT16
2426 <tr><td>FLOAT32
2427 <tr><td>QASYMMS8
2428 <tr><td>QASYMMU8
2429 <tr><td>QSYMMS16
2430 </table>
2431<tr>
2432 <td>CpuAcc
2433 <td>
2434 <ul>
2435 <li>All
2436 </ul>
2437 <td>
2438 <table>
2439 <tr><th>
2440 <tr><td>QASYMMU8
2441 <tr><td>QASYMMS8
2442 <tr><td>FLOAT16
2443 <tr><td>FLOAT32
2444 </table>
2445<tr>
2446 <td>GpuAcc
2447 <td>
2448 <ul>
2449 <li>All
2450 </ul>
2451 <td>
2452 <table>
2453 <tr><th>
2454 <tr><td>QASYMMU8
2455 <tr><td>QASYMMS8
2456 <tr><td>FLOAT16
2457 <tr><td>FLOAT32
2458 </table>
2459<tr>
2460 <td rowspan="3">QLstmLayer
2461 <td rowspan="3" style="width:200px;"> Layer to perform quantized LSTM (Long Short-Term Memory) operation.
2462 <td rowspan="3">
2463 <ul>
2464 <li>ANEURALNETWORKS_QUANTIZED_LSTM
2465 <li>ANEURALNETWORKS_QUANTIZED_16BIT_LSTM
2466 </ul>
2467 <td>CpuRef
2468 <td>
2469 <ul>
2470 <li>All
2471 </ul>
2472 <td>
2473 <table>
2474 <tr><th>
2475 <tr><td>All
2476 </table>
2477<tr>
2478 <td>CpuAcc
2479 <td>
2480 <ul>
2481 <li>All
2482 </ul>
2483 <td>
2484 <table>
2485 <tr><th>
2486 <tr><td>QASYMMS8
2487 <tr><td>QASYMMU8
2488 <tr><td>SIGNED32
2489 <tr><td>QSYMMS16
2490 </table>
2491<tr>
2492 <td>GpuAcc
2493 <td>
2494 <ul>
2495 <li>All
2496 </ul>
2497 <td>
2498 <table>
2499 <tr><th>
2500 <tr><td>QASYMMS8
2501 <tr><td>QASYMMU8
2502 <tr><td>SIGNED32
2503 <tr><td>QSYMMS16
2504 </table>
2505<tr>
2506 <td rowspan="3">QuantizeLayer
2507 <td rowspan="3" style="width:200px;"> Layer to perform quantization operation.
2508 <td rowspan="3">
2509 <ul>
2510 <li>ANEURALNETWORKS_QUANTIZE
2511 </ul>
2512 <td>CpuRef
2513 <td>
2514 <ul>
2515 <li>All
2516 </ul>
2517 <td>
2518 <table>
2519 <tr><th>
2520 <tr><td>BFLOAT16
2521 <tr><td>FLOAT16
2522 <tr><td>FLOAT32
2523 <tr><td>QASYMMS8
2524 <tr><td>QASYMMU8
2525 <tr><td>QSYMMS8
2526 <tr><td>QSYMMS16
2527 </table>
2528<tr>
2529 <td>CpuAcc
2530 <td>
2531 <ul>
2532 <li>All
2533 </ul>
2534 <td>
2535 <table>
2536 <tr><th>
2537 <tr><td>QASYMMU8
2538 <tr><td>QASYMMS8
2539 <tr><td>QASYMM16
2540 <tr><td>FLOAT16
2541 <tr><td>FLOAT32
2542 </table>
2543<tr>
2544 <td>GpuAcc
2545 <td>
2546 <ul>
2547 <li>All
2548 </ul>
2549 <td>
2550 <table>
2551 <tr><th>
2552 <tr><td>QASYMMU8
2553 <tr><td>QASYMMS8
2554 <tr><td>QASYMM16
2555 <tr><td>FLOAT16
2556 <tr><td>FLOAT32
2557 </table>
2558<tr>
2559 <td rowspan="3">QuantizedLstmLayer
2560 <td rowspan="3" style="width:200px;"> Layer to perform quantized LSTM (Long Short-Term Memory) operation.
2561 <td rowspan="3">
2562 <ul>
2563 <li>ANEURALNETWORKS_QUANTIZED_LSTM
2564 <li>ANEURALNETWORKS_QUANTIZED_16BIT_LSTM
2565 </ul>
2566 <td>CpuRef
2567 <td>
2568 <ul>
2569 <li>All
2570 </ul>
2571 <td>
2572 <table>
2573 <tr><th>
2574 <tr><td>All
2575 </table>
2576<tr>
2577 <td>CpuAcc
2578 <td>
2579 <ul>
2580 <li>All
2581 </ul>
2582 <td>
2583 <table>
2584 <tr><th>
2585 <tr><td>SIGNED32
2586 <tr><td>QASYMMU8
2587 <tr><td>QSYMMS16
2588 </table>
2589<tr>
2590 <td>GpuAcc
2591 <td>
2592 <ul>
2593 <li>All
2594 </ul>
2595 <td>
2596 <table>
2597 <tr><th>
2598 <tr><td>SIGNED32
2599 <tr><td>QASYMMU8
2600 <tr><td>QSYMMS16
2601 </table>
2602<tr>
2603 <td rowspan="3">RankLayer
2604 <td rowspan="3" style="width:200px;"> Layer to perform a rank operation.
2605 <td rowspan="3">
2606 <ul>
2607 <li>ANEURALNETWORKS_RANK
2608 </ul>
2609 <td>CpuRef
2610 <td>
2611 <ul>
2612 <li>All
2613 </ul>
2614 <td>
2615 <table>
2616 <tr><th>
2617 <tr><td>All
2618 </table>
2619<tr>
2620 <td>CpuAcc
2621 <td>
2622 <ul>
2623 <li>All
2624 </ul>
2625 <td>
2626 <table>
2627 <tr><th>
2628 <tr><td>All
2629 </table>
2630<tr>
2631 <td>GpuAcc
2632 <td>
2633 <ul>
2634 <li>All
2635 </ul>
2636 <td>
2637 <table>
2638 <tr><th>
2639 <tr><td>All
2640 </table>
2641<tr>
2642 <td rowspan="3">ReduceLayer
2643 <td rowspan="3" style="width:200px;"> Layer to perform reduce with the following operations - ARG_IDX_MAX: Index of the max value - ARG_IDX_MIN: Index of the min value - MEAN_SUM: Mean of sum - PROD: Product - SUM_SQUARE: Sum of squares - SUM: Sum - MIN: Min - MAX: Max
2644 <td rowspan="3">
2645 <ul>
2646 <li>ANEURALNETWORKS_REDUCE_MAX
2647 <li>ANEURALNETWORKS_REDUCE_MIN
2648 <li>ANEURALNETWORKS_REDUCE_SUM
Teresa Charlin32b78702021-09-03 11:25:54 +01002649 <li>ANEURALNETWORKS_REDUCE_PROD
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002650 </ul>
2651 <td>CpuRef
2652 <td>
2653 <ul>
2654 <li>All
2655 </ul>
2656 <td>
2657 <table>
2658 <tr><th>
2659 <tr><td>BFLOAT16
2660 <tr><td>FLOAT16
2661 <tr><td>FLOAT32
2662 <tr><td>QASYMMS8
2663 <tr><td>QASYMMU8
2664 <tr><td>QSYMMS16
2665 <tr><td>SIGNED32
2666 </table>
2667<tr>
2668 <td>CpuAcc
2669 <td>
2670 <ul>
2671 <li>All
2672 </ul>
2673 <td>
2674 <table>
2675 <tr><th>
2676 <tr><td>QASYMMU8
2677 <tr><td>QASYMMS8
2678 <tr><td>FLOAT16
2679 <tr><td>FLOAT32
2680 <tr><td>SIGNED32
2681 </table>
2682<tr>
2683 <td>GpuAcc
2684 <td>
2685 <ul>
2686 <li>All
2687 </ul>
2688 <td>
2689 <table>
2690 <tr><th>
2691 <tr><td>QASYMMU8
2692 <tr><td>QASYMMS8
2693 <tr><td>FLOAT16
2694 <tr><td>FLOAT32
2695 <tr><td>SIGNED32
2696 </table>
2697<tr>
2698 <td rowspan="3">ReshapeLayer
2699 <td rowspan="3" style="width:200px;"> Layer to reshape a tensor.
2700 <td rowspan="3">
2701 <ul>
2702 <li>ANEURALNETWORKS_RESHAPE
2703 <li>ANEURALNETWORKS_SQUEEZE
2704 <li>ANEURALNETWORKS_EXPAND_DIMS
2705 </ul>
2706 <td>CpuRef
2707 <td>
2708 <ul>
2709 <li>All
2710 </ul>
2711 <td>
2712 <table>
2713 <tr><th>
2714 <tr><td>BFLOAT16
2715 <tr><td>FLOAT16
2716 <tr><td>FLOAT32
2717 <tr><td>QASYMMS8
2718 <tr><td>QASYMMU8
2719 <tr><td>QSYMMS16
2720 <tr><td>SIGNED32
2721 <tr><td>BOOLEAN
2722 </table>
2723<tr>
2724 <td>CpuAcc
2725 <td>
2726 <ul>
2727 <li>All
2728 </ul>
2729 <td>
2730 <table>
2731 <tr><th>
2732 <tr><td>All
2733 </table>
2734<tr>
2735 <td>GpuAcc
2736 <td>
2737 <ul>
2738 <li>All
2739 </ul>
2740 <td>
2741 <table>
2742 <tr><th>
2743 <tr><td>All
2744 </table>
2745<tr>
2746 <td rowspan="3">ResizeLayer
2747 <td rowspan="3" style="width:200px;"> Layer to perform resize of a tensor using one of the interpolation methods: - Bilinear - Nearest Neighbor.
2748 <td rowspan="3">
2749 <ul>
2750 <li>ANEURALNETWORKS_RESIZE_BILINEAR
2751 <li>ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR
2752 </ul>
2753 <td>CpuRef
2754 <td>
2755 <ul>
2756 <li>All
2757 </ul>
2758 <td>
2759 <table>
2760 <tr><th>
2761 <tr><td>BFLOAT16
2762 <tr><td>FLOAT16
2763 <tr><td>FLOAT32
2764 <tr><td>QASYMMS8
2765 <tr><td>QASYMMU8
2766 <tr><td>QSYMMS16
2767 </table>
2768<tr>
2769 <td>CpuAcc
2770 <td>
2771 <ul>
2772 <li>NHWC
2773 <li>NCHW
2774 </ul>
2775 <td>
2776 <table>
2777 <tr><th>
2778 <tr><td>QASYMMU8
2779 <tr><td>QASYMMS8
2780 <tr><td>FLOAT16
2781 <tr><td>FLOAT32
2782 </table>
2783<tr>
Tracy Narine944fb502023-07-04 15:08:57 +01002784 <td>GpuAcc
2785 <td>
2786 <ul>
2787 <li>NHWC
2788 <li>NCHW
2789 </ul>
2790 <td>
2791 <table>
2792 <tr><th>
2793 <tr><td>QASYMMU8
2794 <tr><td>QASYMMS8
2795 <tr><td>FLOAT16
2796 <tr><td>FLOAT32
2797 </table>
2798<tr>
Tianle Cheng988354d2023-06-28 13:20:47 +01002799 <td rowspan="3">ReverseV2Layer
Tracy Narine944fb502023-07-04 15:08:57 +01002800 <td rowspan="3" style="width:200px;"> Layer to perform reverse of a tensor.
Tianle Cheng988354d2023-06-28 13:20:47 +01002801 <td rowspan="3">
2802 <ul>
Tracy Narine944fb502023-07-04 15:08:57 +01002803 <li>NA
Tianle Cheng988354d2023-06-28 13:20:47 +01002804 </ul>
2805 <td>CpuRef
2806 <td>
2807 <ul>
2808 <li>All
2809 </ul>
2810 <td>
2811 <table>
2812 <tr><th>
2813 <tr><td>BFLOAT16
2814 <tr><td>FLOAT16
2815 <tr><td>FLOAT32
2816 <tr><td>QASYMMS8
2817 <tr><td>QASYMMU8
2818 <tr><td>QSYMMS16
2819 </table>
2820<tr>
2821 <td>CpuAcc
2822 <td>
2823 <ul>
Tracy Narine944fb502023-07-04 15:08:57 +01002824 <li>All
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002825 </ul>
2826 <td>
2827 <table>
2828 <tr><th>
Tracy Narine944fb502023-07-04 15:08:57 +01002829 <tr><td>All
2830 </table>
2831<tr>
2832 <td>GpuAcc
2833 <td>
2834 <ul>
2835 <li>All
2836 </ul>
2837 <td>
2838 <table>
2839 <tr><th>
2840 <tr><td>All
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002841 </table>
2842<tr>
2843 <td rowspan="3">RsqrtLayer
2844 <td rowspan="3" style="width:200px;"> Layer to perform Rsqrt operation.
2845 <td rowspan="3">
2846 <ul>
2847 <li>ANEURALNETWORKS_RSQRT
2848 </ul>
2849 <td>CpuRef
2850 <td>
2851 <ul>
2852 <li>All
2853 </ul>
2854 <td>
2855 <table>
2856 <tr><th>
2857 <tr><td>BFLOAT16
2858 <tr><td>FLOAT16
2859 <tr><td>FLOAT32
2860 <tr><td>QASYMMS8
2861 <tr><td>QASYMMU8
2862 <tr><td>QSYMMS16
2863 <tr><td>SIGNED32
2864 </table>
2865<tr>
2866 <td>CpuAcc
2867 <td>
2868 <ul>
2869 <li>All
2870 </ul>
2871 <td>
2872 <table>
2873 <tr><th>
2874 <tr><td>FLOAT16
2875 <tr><td>FLOAT32
2876 <tr><td>SIGNED32
2877 </table>
2878<tr>
2879 <td>GpuAcc
2880 <td>
2881 <ul>
2882 <li>All
2883 </ul>
2884 <td>
2885 <table>
2886 <tr><th>
2887 <tr><td>FLOAT16
2888 <tr><td>FLOAT32
2889 </table>
2890<tr>
2891 <td rowspan="3">ShapeLayer
2892 <td rowspan="3" style="width:200px;"> Layer to return the shape of the input tensor.
2893 <td rowspan="3">
2894 <ul>
2895 <li>N/A
2896 </ul>
2897 <td>CpuRef
2898 <td>
2899 <ul>
2900 <li>All
2901 </ul>
2902 <td>
2903 <table>
2904 <tr><th>
2905 <tr><td>All
2906 </table>
2907<tr>
2908 <td>CpuAcc
2909 <td>
2910 <ul>
2911 <li>All
2912 </ul>
2913 <td>
2914 <table>
2915 <tr><th>
2916 <tr><td>All
2917 </table>
2918<tr>
2919 <td>GpuAcc
2920 <td>
2921 <ul>
2922 <li>All
2923 </ul>
2924 <td>
2925 <table>
2926 <tr><th>
2927 <tr><td>All
2928 </table>
2929<tr>
2930 <td rowspan="3">SliceLayer
2931 <td rowspan="3" style="width:200px;"> Layer to perform tensor slicing.
2932 <td rowspan="3">
2933 <ul>
2934 <li>ANEURALNETWORKS_SLICE
2935 </ul>
2936 <td>CpuRef
2937 <td>
2938 <ul>
2939 <li>All
2940 </ul>
2941 <td>
2942 <table>
2943 <tr><th>
2944 <tr><td>BFLOAT16
2945 <tr><td>FLOAT32
2946 <tr><td>QASYMMS8
2947 <tr><td>QASYMMU8
2948 <tr><td>QSYMMS16
2949 </table>
2950<tr>
2951 <td>CpuAcc
2952 <td>
2953 <ul>
2954 <li>All
2955 </ul>
2956 <td>
2957 <table>
2958 <tr><th>
2959 <tr><td>All
2960 </table>
2961<tr>
2962 <td>GpuAcc
2963 <td>
2964 <ul>
2965 <li>All
2966 </ul>
2967 <td>
2968 <table>
2969 <tr><th>
2970 <tr><td>All
2971 </table>
2972<tr>
2973 <td rowspan="3">SoftmaxLayer
2974 <td rowspan="3" style="width:200px;"> Layer to perform softmax, log-softmax operation over the specified axis.
2975 <td rowspan="3">
2976 <ul>
2977 <li>ANEURALNETWORKS_LOG_SOFTMAX
2978 <li>ANEURALNETWORKS_SOFTMAX
2979 </ul>
2980 <td>CpuRef
2981 <td>
2982 <ul>
2983 <li>All
2984 </ul>
2985 <td>
2986 <table>
2987 <tr><th>
2988 <tr><td>BFLOAT16
2989 <tr><td>FLOAT16
2990 <tr><td>FLOAT32
2991 <tr><td>QASYMMS8
2992 <tr><td>QASYMMU8
2993 <tr><td>QSYMMS8
2994 <tr><td>QSYMMS16
2995 </table>
2996<tr>
2997 <td>CpuAcc
2998 <td>
2999 <ul>
3000 <li>All
3001 </ul>
3002 <td>
3003 <table>
3004 <tr><th>
3005 <tr><td>QASYMMU8
3006 <tr><td>QASYMMS8
3007 <tr><td>FLOAT16
3008 <tr><td>FLOAT32
3009 </table>
3010<tr>
3011 <td>GpuAcc
3012 <td>
3013 <ul>
3014 <li>All
3015 </ul>
3016 <td>
3017 <table>
3018 <tr><th>
3019 <tr><td>QASYMMU8
3020 <tr><td>QASYMMS8
3021 <tr><td>FLOAT16
3022 <tr><td>FLOAT32
3023 </table>
3024<tr>
3025 <td rowspan="3">SpaceToBatchNdLayer
3026 <td rowspan="3" style="width:200px;"> Layer to divide spatial dimensions of the tensor into a grid of blocks and interleaves these blocks with the batch dimension.
3027 <td rowspan="3">
3028 <ul>
3029 <li>ANEURALNETWORKS_SPACE_TO_BATCH_ND
3030 </ul>
3031 <td>CpuRef
3032 <td>
3033 <ul>
3034 <li>All
3035 </ul>
3036 <td>
3037 <table>
3038 <tr><th>
3039 <tr><td>BFLOAT16
3040 <tr><td>FLOAT16
3041 <tr><td>FLOAT32
3042 <tr><td>QASYMMS8
3043 <tr><td>QASYMMU8
3044 <tr><td>QSYMMS16
3045 </table>
3046<tr>
3047 <td>CpuAcc
3048 <td>
3049 <ul>
3050 <li>NHWC
3051 <li>NCHW
3052 </ul>
3053 <td>
3054 <table>
3055 <tr><th>
3056 <tr><td>All
3057 </table>
3058<tr>
3059 <td>GpuAcc
3060 <td>
3061 <ul>
3062 <li>NHWC
3063 <li>NCHW
3064 </ul>
3065 <td>
3066 <table>
3067 <tr><th>
3068 <tr><td>All
3069 </table>
3070<tr>
3071 <td rowspan="3">SpaceToDepthLayer
3072 <td rowspan="3" style="width:200px;"> Layer to rearrange blocks of spatial data into depth.
3073 <td rowspan="3">
3074 <ul>
3075 <li>ANEURALNETWORKS_SPACE_TO_DEPTH
3076 </ul>
3077 <td>CpuRef
3078 <td>
3079 <ul>
3080 <li>All
3081 </ul>
3082 <td>
3083 <table>
3084 <tr><th>
3085 <tr><td>BFLOAT16
3086 <tr><td>FLOAT16
3087 <tr><td>FLOAT32
3088 <tr><td>QASYMMS8
3089 <tr><td>QASYMMU8
3090 <tr><td>QSYMMS16
3091 </table>
3092<tr>
3093 <td>CpuAcc
3094 <td>
3095 <ul>
3096 <li>NHWC
3097 <li>NCHW
3098 </ul>
3099 <td>
3100 <table>
3101 <tr><th>
3102 <tr><td>All
3103 </table>
3104<tr>
3105 <td>GpuAcc
3106 <td>
3107 <ul>
3108 <li>NHWC
3109 <li>NCHW
3110 </ul>
3111 <td>
3112 <table>
3113 <tr><th>
3114 <tr><td>All
3115 </table>
3116<tr>
3117 <td rowspan="3">SplitterLayer
3118 <td rowspan="3" style="width:200px;"> Layer to split a tensor along a given axis.
3119 <td rowspan="3">
3120 <ul>
3121 <li>ANEURALNETWORKS_SPLIT
3122 </ul>
3123 <td>CpuRef
3124 <td>
3125 <ul>
3126 <li>All
3127 </ul>
3128 <td>
3129 <table>
3130 <tr><th>
3131 <tr><td>BFLOAT16
3132 <tr><td>FLOAT16
3133 <tr><td>FLOAT32
3134 <tr><td>QASYMMS8
3135 <tr><td>QASYMMU8
3136 <tr><td>QSYMMS16
3137 </table>
3138<tr>
3139 <td>CpuAcc
3140 <td>
3141 <ul>
3142 <li>All
3143 </ul>
3144 <td>
3145 <table>
3146 <tr><th>
3147 <tr><td>All
3148 </table>
3149<tr>
3150 <td>GpuAcc
3151 <td>
3152 <ul>
3153 <li>All
3154 </ul>
3155 <td>
3156 <table>
3157 <tr><th>
3158 <tr><td>All
3159 </table>
3160<tr>
3161 <td rowspan="3">StackLayer
3162 <td rowspan="3" style="width:200px;"> Layer to stack tensors along an axis.
3163 <td rowspan="3">
3164 <ul>
3165 <li>N/A
3166 </ul>
3167 <td>CpuRef
3168 <td>
3169 <ul>
3170 <li>All
3171 </ul>
3172 <td>
3173 <table>
3174 <tr><th>
3175 <tr><td>BFLOAT16
3176 <tr><td>FLOAT16
3177 <tr><td>FLOAT32
3178 <tr><td>QASYMMS8
3179 <tr><td>QASYMMU8
3180 <tr><td>QSYMMS16
3181 </table>
3182<tr>
3183 <td>CpuAcc
3184 <td>
3185 <ul>
3186 <li>All
3187 </ul>
3188 <td>
3189 <table>
3190 <tr><th>
3191 <tr><td>All
3192 </table>
3193<tr>
3194 <td>GpuAcc
3195 <td>
3196 <ul>
3197 <li>All
3198 </ul>
3199 <td>
3200 <table>
3201 <tr><th>
3202 <tr><td>All
3203 </table>
3204<tr>
3205 <td rowspan="1">StandInLayer
3206 <td rowspan="1" style="width:200px;"> A layer to represent "unknown" or "unsupported" operations in the input graph. It has a configurable number of input and output slots and an optional name.
3207 <td rowspan="1">
3208 <ul>
3209 <li>N/A
3210 </ul>
3211 <td>N/A
3212 <td>N/A
3213 <td>N/A
3214<tr>
3215 <td rowspan="3">StridedSliceLayer
3216 <td rowspan="3" style="width:200px;"> Layer to extract a strided slice of a tensor.
3217 <td rowspan="3">
3218 <ul>
3219 <li>ANEURALNETWORKS_STRIDED_SLICE
3220 </ul>
3221 <td>CpuRef
3222 <td>
3223 <ul>
3224 <li>All
3225 </ul>
3226 <td>
3227 <table>
3228 <tr><th>
3229 <tr><td>BFLOAT16
3230 <tr><td>FLOAT32
3231 <tr><td>QASYMMS8
3232 <tr><td>QASYMMU8
3233 <tr><td>QSYMMS16
3234 </table>
3235<tr>
3236 <td>CpuAcc
3237 <td>
3238 <ul>
3239 <li>All
3240 </ul>
3241 <td>
3242 <table>
3243 <tr><th>
3244 <tr><td>All
3245 </table>
3246<tr>
3247 <td>GpuAcc
3248 <td>
3249 <ul>
3250 <li>All
3251 </ul>
3252 <td>
3253 <table>
3254 <tr><th>
3255 <tr><td>All
3256 </table>
3257<tr>
3258 <td rowspan="3">SubtractionLayer
3259 <td rowspan="3" style="width:200px;"> Layer to perform an elementwise subtract of 2 tensors.
3260 <td rowspan="3">
3261 <ul>
3262 <li>ANEURALNETWORKS_SUB
3263 </ul>
3264 <td>CpuRef
3265 <td>
3266 <ul>
3267 <li>All
3268 </ul>
3269 <td>
3270 <table>
3271 <tr><th>
3272 <tr><td>BFLOAT16
3273 <tr><td>FLOAT16
3274 <tr><td>FLOAT32
3275 <tr><td>QASYMMS8
3276 <tr><td>QASYMMU8
3277 <tr><td>QSYMMS16
3278 <tr><td>SIGNED32
3279 </table>
3280<tr>
3281 <td>CpuAcc
3282 <td>
3283 <ul>
3284 <li>All
3285 </ul>
3286 <td>
3287 <table>
3288 <tr><th>
3289 <tr><td>QASYMMU8
3290 <tr><td>QASYMMS8
3291 <tr><td>QSYMMS16
3292 <tr><td>SIGNED32
3293 <tr><td>FLOAT16
3294 <tr><td>FLOAT32
3295 </table>
3296<tr>
3297 <td>GpuAcc
3298 <td>
3299 <ul>
3300 <li>All
3301 </ul>
3302 <td>
3303 <table>
3304 <tr><th>
3305 <tr><td>QASYMMU8
3306 <tr><td>QASYMMS8
3307 <tr><td>QSYMMS16
3308 <tr><td>SIGNED32
3309 <tr><td>FLOAT16
3310 <tr><td>FLOAT32
3311 </table>
3312<tr>
Teresa Charlin79a06a52023-07-13 17:16:45 +01003313 <td rowspan="3">TileLayer
3314 <td rowspan="3" style="width:200px;"> Layer to construct a tensor by repeating in tiles a given tensor.
3315 <td rowspan="3">
3316 <ul>
3317 <li>ANEURALNETWORKS_TILE
3318 </ul>
3319 <td>CpuRef
3320 <td>
3321 <ul>
3322 <li>All
3323 </ul>
3324 <td>
3325 <table>
3326 <tr><th>
3327 <tr><td>FLOAT16
3328 <tr><td>FLOAT32
3329 <tr><td>QASYMMS8
3330 <tr><td>QASYMMU8
Cian McGriskin3b3dcbf2023-07-26 11:52:47 +01003331 <tr><td>QSYMMS8
Teresa Charlin79a06a52023-07-13 17:16:45 +01003332 <tr><td>QSYMMS16
3333 <tr><td>SIGNED32
3334 </table>
3335<tr>
3336 <td>CpuAcc
3337 <td>
3338 <ul>
David Monahan36e6eae2023-07-26 18:37:45 +01003339 <li>All
Teresa Charlin79a06a52023-07-13 17:16:45 +01003340 </ul>
3341 <td>
David Monahan36e6eae2023-07-26 18:37:45 +01003342 <table>
3343 <tr><th>
3344 <tr><td>FLOAT16
3345 <tr><td>FLOAT32
3346 <tr><td>QASYMMS8
3347 <tr><td>QASYMMU8
3348 <tr><td>QASYMM8
3349 <tr><td>QSYMMS16
3350 <tr><td>SIGNED32
3351 </table>
Teresa Charlin79a06a52023-07-13 17:16:45 +01003352<tr>
3353 <td>GpuAcc
3354 <td>
3355 <ul>
Cian McGriskin3b3dcbf2023-07-26 11:52:47 +01003356 <li>All
Teresa Charlin79a06a52023-07-13 17:16:45 +01003357 </ul>
3358 <td>
3359 <table>
3360 <tr><th>
Cian McGriskin3b3dcbf2023-07-26 11:52:47 +01003361 <tr><td>FLOAT16
3362 <tr><td>FLOAT32
3363 <tr><td>QASYMMS8
3364 <tr><td>QASYMMU8
3365 <tr><td>QSYMMS8
3366 <tr><td>QSYMMS16
3367 <tr><td>SIGNED32
Teresa Charlin79a06a52023-07-13 17:16:45 +01003368 </table>
3369<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01003370 <td rowspan="3">TransposeConvolution2dLayer
3371 <td rowspan="3" style="width:200px;"> Layer to perform 2D transpose convolution (deconvolution) operation.
3372 <td rowspan="3">
3373 <ul>
3374 <li>ANEURALNETWORKS_TRANSPOSE_CONV_2D
3375 </ul>
3376 <td>CpuRef
3377 <td>
3378 <ul>
3379 <li>All
3380 </ul>
3381 <td>
3382 <table>
3383 <tr><th>
3384 <tr><td>BFLOAT16
3385 <tr><td>FLOAT16
3386 <tr><td>FLOAT32
3387 <tr><td>QASYMMS8
3388 <tr><td>QASYMMU8
3389 <tr><td>QSYMMS8
3390 <tr><td>QSYMMS16
3391 </table>
3392<tr>
3393 <td>CpuAcc
3394 <td>
3395 <ul>
3396 <li>NHWC
3397 <li>NCHW
3398 </ul>
3399 <td>
3400 <table>
3401 <tr><th>
3402 <tr><td>SIGNED32
3403 <tr><td>FLOAT16
3404 <tr><td>FLOAT32
3405 <tr><td>QASYMMU8
3406 <tr><td>QASYMMS8
3407 <tr><td>QUANTIZEDSYMM8PERAXIS
3408 </table>
3409<tr>
3410 <td>GpuAcc
3411 <td>
3412 <ul>
3413 <li>NHWC
3414 <li>NCHW
3415 </ul>
3416 <td>
3417 <table>
3418 <tr><th>
3419 <tr><td>SIGNED32
3420 <tr><td>FLOAT16
3421 <tr><td>FLOAT32
3422 <tr><td>QASYMMU8
3423 <tr><td>QASYMMS8
3424 <tr><td>QUANTIZEDSYMM8PERAXIS
3425 </table>
3426<tr>
3427 <td rowspan="3">TransposeLayer
3428 <td rowspan="3" style="width:200px;"> Layer to transpose a tensor.
3429 <td rowspan="3">
3430 <ul>
3431 <li>ANEURALNETWORKS_TRANSPOSE
3432 </ul>
3433 <td>CpuRef
3434 <td>
3435 <ul>
3436 <li>All
3437 </ul>
3438 <td>
3439 <table>
3440 <tr><th>
3441 <tr><td>BFLOAT16
3442 <tr><td>FLOAT16
3443 <tr><td>FLOAT32
3444 <tr><td>QASYMMS8
3445 <tr><td>QASYMMU8
3446 <tr><td>QSYMMS16
3447 </table>
3448<tr>
3449 <td>CpuAcc
3450 <td>
3451 <ul>
3452 <li>All
3453 </ul>
3454 <td>
3455 <table>
3456 <tr><th>
3457 <tr><td>All
3458 </table>
3459<tr>
3460 <td>GpuAcc
3461 <td>
3462 <ul>
3463 <li>All
3464 </ul>
3465 <td>
3466 <table>
3467 <tr><th>
3468 <tr><td>All
3469 </table>
3470<tr>
3471 <td rowspan="3">UnidirectionalSquenceLstmLayer
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01003472 <td rowspan="3" style="width:200px;"> Layer to perform unidirectional sequence LSTM operation.
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01003473 <td rowspan="3">
3474 <ul>
3475 <li>ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM
3476 </ul>
3477 <td>CpuRef
3478 <td>
3479 <ul>
3480 <li>All
3481 </ul>
3482 <td>
3483 <table>
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01003484 <tr><th>Input Types
3485 <tr><td>FLOAT32
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01003486 </table>
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01003487 <table>
3488 <tr><th>Weight Types
3489 <tr><td>FLOAT32
3490 <tr><td>QASYMMS8
3491 </table>
Nikhil Raj930e1a22023-06-08 09:49:46 +01003492<tr>
Cathal Corbettfd5bec42022-03-03 15:13:23 +00003493 <td>CpuAcc
3494 <td>
3495 <ul>
3496 <li>All
3497 </ul>
3498 <td>
3499 <table>
3500 <tr><th>Input Types
3501 <tr><td>FLOAT32
3502 </table>
3503 <table>
3504 <tr><th>Weight Types
3505 <tr><td>FLOAT32
3506 </table>
Nikhil Raj930e1a22023-06-08 09:49:46 +01003507<tr>
Cathal Corbett4952a3e2022-03-03 15:14:18 +00003508 <td>GpuAcc
3509 <td>
3510 <ul>
3511 <li>All
3512 </ul>
3513 <td>
3514 <table>
3515 <tr><th>Input Types
3516 <tr><td>FLOAT32
3517 </table>
3518 <table>
3519 <tr><th>Weight Types
3520 <tr><td>FLOAT32
3521 </table>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01003522<tr>
3523 <td rowspan="3">UnmapLayer
3524 <td rowspan="3" style="width:200px;"> Layer to perform unmap operation on tensor.
3525 <td rowspan="3">
3526 <ul>
3527 <li>N/A
3528 </ul>
3529 <td>CpuRef
3530 <td>
3531 <ul>
3532 <li>All
3533 </ul>
3534 <td>
3535 <table>
3536 <tr><th>
3537 <tr><td>All
3538 </table>
3539<tr>
3540 <td>CpuAcc
3541 <td>
3542 <ul>
3543 <li>NHWC
3544 <li>NCHW
3545 </ul>
3546 <td>
3547 <table>
3548 <tr><th>
3549 <tr><td>All
3550 </table>
3551<tr>
3552 <td>GpuAcc
3553 <td>
3554 <ul>
3555 <li>NHWC
3556 <li>NCHW
3557 </ul>
3558 <td>
3559 <table>
3560 <tr><th>
3561 <tr><td>All
3562 </table>
3563</table>
3564
3565*/
Tracy Narine944fb502023-07-04 15:08:57 +01003566} // namespace