blob: d7b2bb5bb8f955bdd0a71789edce83af7da3b9d5 [file] [log] [blame]
Teresa Charlin1fe6c812022-11-01 15:59:50 +00001/// Copyright (c) 2021, 2023 ARM Limited and Contributors. All rights reserved.
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002///
3/// SPDX-License-Identifier: MIT
4///
5
6namespace armnn
7{
8/**
9@page operator_list Arm NN Operators
10
11@tableofcontents
12
13@section S5_1_operator_list Arm NN Operators
14
15Arm NN supports operators that are listed in below table.
16
17Arm NN supports a wide list of data-types.
18The main data-types that the Machine Learning functions support are the following:
19 <ul>
20 <li><b>BFLOAT16:</b> 16-bit non-standard brain floating point
21 <li><b>QASYMMU8:</b> 8-bit unsigned asymmetric quantized
22 <li><b>QASYMMS8:</b> 8-bit signed asymmetric quantized
23 <li><b>QUANTIZEDSYMM8PERAXIS:</b> 8-bit signed symmetric quantized
Cathal Corbettb85113e2022-02-22 11:51:43 +000024 <li><b>QSYMMS8:</b> 8-bit signed symmetric quantized
25 <li><b>QSYMMS16:</b> 16-bit signed symmetric quantized
Sadik Armagan1a9c9f62021-08-05 09:25:15 +010026 <li><b>FLOAT32:</b> 32-bit single precision floating point
27 <li><b>FLOAT16:</b> 16-bit half precision floating point
28 <li><b>SIGNED32:</b> 32-bit signed integer
29 <li><b>BOOLEAN:</b> 8-bit unsigned char
30 <li><b>All:</b> Agnostic to any specific data type
31 </ul>
32
33Arm NN supports the following data layouts (fast changing dimension from right to left):
34 <ul>
35 <li><b>NHWC:</b> Layout where channels are in the fastest changing dimension
36 <li><b>NCHW:</b> Layout where width is in the fastest changing dimension
37 <li><b>All:</b> Agnostic to any specific data layout
38 </ul>
39where N = batches, C = channels, H = height, W = width
40
41<table>
42<caption id="multi_row"></caption>
43<tr>
44 <th>Operator
45 <th>Description
46 <th>Equivalent Android NNAPI Operator
47 <th>Backends
48 <th>Data Layouts
49 <th>Data Types
50<tr>
51 <td rowspan="3">AbsLayer
52 <td rowspan="3"> Layer to perform absolute operation.
53 <td rowspan="3">
54 <ul>
55 <li>ANEURALNETWORKS_ABS
56 </ul>
57 <td>CpuRef
58 <td>
59 <ul>
60 <li>All
61 </ul>
62 <td>
63 <table>
64 <tr><th>
65 <tr><td>BFLOAT16
66 <tr><td>FLOAT16
67 <tr><td>FLOAT32
68 <tr><td>QASYMMS8
69 <tr><td>QASYMMU8
70 <tr><td>QSYMMS16
71 <tr><td>SIGNED32
72 </table>
73<tr>
74 <td>CpuAcc
75 <td>
76 <ul>
77 <li>All
78 </ul>
79 <td>
80 <table>
81 <tr><th>
82 <tr><td>FLOAT16
83 <tr><td>FLOAT32
84 <tr><td>SIGNED32
85 </table>
86<tr>
87 <td>GpuAcc
88 <td>
89 <ul>
90 <li>All
91 </ul>
92 <td>
93 <table>
94 <tr><th>
95 <tr><td>FLOAT16
96 <tr><td>FLOAT32
97 </table>
98<tr>
99 <td rowspan="3">ActivationLayer
100 <td rowspan="3" style="width:200px;"> Layer to simulate an activation layer with the specified activation function.
101 <td rowspan="3">
102 <ul>
103 <li>ANEURALNETWORKS_ABS
104 <li>ANEURALNETWORKS_ELU
105 <li>ANEURALNETWORKS_HARD_SWISH
106 <li>ANEURALNETWORKS_LOGISTIC
107 <li>ANEURALNETWORKS_PRELU
108 <li>ANEURALNETWORKS_RELU
109 <li>ANEURALNETWORKS_RELU1
110 <li>ANEURALNETWORKS_RELU6
111 <li>ANEURALNETWORKS_SQRT
112 <li>ANEURALNETWORKS_TANH
113 </ul>
114 <td>CpuRef
115 <td>
116 <ul>
117 <li>All
118 </ul>
119 <td>
120 <table>
121 <tr><th>
122 <tr><td>BFLOAT16
123 <tr><td>FLOAT16
124 <tr><td>FLOAT32
125 <tr><td>QASYMMS8
126 <tr><td>QASYMMU8
127 <tr><td>QSYMMS16
128 </table>
129<tr>
130 <td>CpuAcc
131 <td>
132 <ul>
133 <li>All
134 </ul>
135 <td>
136 <table>
137 <tr><th>
138 <tr><td>QASYMMU8
139 <tr><td>QASYMMS8
140 <tr><td>QSYMMS16
141 <tr><td>FLOAT16
142 <tr><td>FLOAT32
143 </table>
144<tr>
145 <td>GpuAcc
146 <td>
147 <ul>
148 <li>All
149 </ul>
150 <td>
151 <table>
152 <tr><th>
153 <tr><td>QASYMMU8
154 <tr><td>QASYMMS8
155 <tr><td>QSYMMS16
156 <tr><td>FLOAT16
157 <tr><td>FLOAT32
158 </table>
159<tr>
160 <td rowspan="3">AdditionLayer
161 <td rowspan="3" style="width:200px;"> Layer to add 2 tensors.
162 <td rowspan="3">
163 <ul>
164 <li>ANEURALNETWORKS_ADD
165 </ul>
166 <td>CpuRef
167 <td>
168 <ul>
169 <li>All
170 </ul>
171 <td>
172 <table>
173 <tr><th>
174 <tr><td>BFLOAT16
175 <tr><td>FLOAT16
176 <tr><td>FLOAT32
177 <tr><td>QASYMMS8
178 <tr><td>QASYMMU8
179 <tr><td>QSYMMS16
180 <tr><td>SIGNED32
181 </table>
182<tr>
183 <td>CpuAcc
184 <td>
185 <ul>
186 <li>All
187 </ul>
188 <td>
189 <table>
190 <tr><th>
191 <tr><td>QASYMMU8
192 <tr><td>QASYMMS8
193 <tr><td>QSYMMS16
194 <tr><td>SIGNED32
195 <tr><td>FLOAT16
196 <tr><td>FLOAT32
197 </table>
198<tr>
199 <td>GpuAcc
200 <td>
201 <ul>
202 <li>All
203 </ul>
204 <td>
205 <table>
206 <tr><th>
207 <tr><td>QASYMMU8
208 <tr><td>QASYMMS8
209 <tr><td>QSYMMS16
210 <tr><td>SIGNED32
211 <tr><td>FLOAT16
212 <tr><td>FLOAT32
213 </table>
214<tr>
215 <td rowspan="3">ArgMinMaxLayer
216 <td rowspan="3" style="width:200px;"> Layer to calculate the index of the minimum or maximum values in a tensor
217 based on an axis.
218 <td rowspan="3">
219 <ul>
220 <li>ANEURALNETWORKS_ARGMAX
221 <li>ANEURALNETWORKS_ARGMIN
222 </ul>
223 <td>CpuRef
224 <td>
225 <ul>
226 <li>All
227 </ul>
228 <td>
229 <table>
230 <tr><th>
231 <tr><td>BFLOAT16
232 <tr><td>FLOAT16
233 <tr><td>FLOAT32
234 <tr><td>QASYMMS8
235 <tr><td>QASYMMU8
236 <tr><td>QSYMMS16
237 <tr><td>SIGNED32
238 <tr><td>SIGNED64
239 </table>
240<tr>
241 <td>CpuAcc
242 <td>
243 <ul>
244 <li>All
245 </ul>
246 <td>
247 <table>
248 <tr><th>
249 <tr><td>QASYMMU8
250 <tr><td>QASYMMS8
251 <tr><td>SIGNED32
252 <tr><td>FLOAT16
253 <tr><td>FLOAT32
254 </table>
255<tr>
256 <td>GpuAcc
257 <td>
258 <ul>
259 <li>All
260 </ul>
261 <td>
262 <table>
263 <tr><th>
264 <tr><td>QASYMMU8
265 <tr><td>QASYMMS8
266 <tr><td>SIGNED32
267 <tr><td>FLOAT16
268 <tr><td>FLOAT32
269 </table>
270<tr>
Samuel Yap6b478092022-07-06 15:36:03 +0100271 <td rowspan="3">BatchMatMulLayer
272 <td rowspan="3" style="width:200px;"> Layer to perform batch matrix multiplication.
273 <td rowspan="3">
274 <ul>
275 <li>N/A
276 </ul>
277 <td>CpuRef
278 <td>
279 <ul>
280 <li>All
281 </ul>
282 <td>
283 <table>
284 <tr><th>
285 <tr><td>BFLOAT16
286 <tr><td>FLOAT16
287 <tr><td>FLOAT32
288 <tr><td>QASYMMS8
289 <tr><td>QASYMMU8
290 <tr><td>QSYMMS16
291 </table>
292<tr>
293 <td>CpuAcc
294 <td>
295 <ul>
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100296 <li>All
Samuel Yap6b478092022-07-06 15:36:03 +0100297 </ul>
298 <td>
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100299 <table>
300 <tr><th>
301 <tr><td>FLOAT32
Teresa Charlin1fe6c812022-11-01 15:59:50 +0000302 <tr><td>QASYMMS8
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100303 </table>
Samuel Yap6b478092022-07-06 15:36:03 +0100304<tr>
305 <td>GpuAcc
306 <td>
307 <ul>
Teresa Charlin94916a52022-10-19 08:48:07 +0100308 <li>All
Samuel Yap6b478092022-07-06 15:36:03 +0100309 </ul>
310 <td>
Teresa Charlin94916a52022-10-19 08:48:07 +0100311 <table>
312 <tr><th>
313 <tr><td>FLOAT32
Teresa Charlin97a3aef2023-01-10 10:32:51 +0000314 <tr><td>QASYMMS8
Teresa Charlin94916a52022-10-19 08:48:07 +0100315 </table>
Samuel Yap6b478092022-07-06 15:36:03 +0100316<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100317 <td rowspan="3">BatchNormalizationLayer
318 <td rowspan="3" style="width:200px;"> Layer to perform batch normalization.
319 <td rowspan="3">
320 <ul>
321 <li>N/A
322 </ul>
323 <td>CpuRef
324 <td>
325 <ul>
326 <li>All
327 </ul>
328 <td>
329 <table>
330 <tr><th>
331 <tr><td>BFLOAT16
332 <tr><td>FLOAT16
333 <tr><td>FLOAT32
334 <tr><td>QASYMMS8
335 <tr><td>QASYMMU8
336 <tr><td>QSYMMS16
337 </table>
338<tr>
339 <td>CpuAcc
340 <td>
341 <ul>
342 <li>NHWC
343 <li>NCHW
344 </ul>
345 <td>
346 <table>
347 <tr><th>
348 <tr><td>FLOAT32
349 <tr><td>FLOAT16
350 </table>
351<tr>
352 <td>GpuAcc
353 <td>
354 <ul>
355 <li>NHWC
356 <li>NCHW
357 </ul>
358 <td>
359 <table>
360 <tr><th>
361 <tr><td>FLOAT32
362 <tr><td>FLOAT16
363 </table>
364<tr>
365 <td rowspan="3">BatchToSpaceNdLayer
366 <td rowspan="3" style="width:200px;"> Layer to perform a batch to space transformation.
367 <td rowspan="3">
368 <ul>
369 <li>ANEURALNETWORKS_BATCH_TO_SPACE_ND
370 </ul>
371 <td>CpuRef
372 <td>
373 <ul>
374 <li>All
375 </ul>
376 <td>
377 <table>
378 <tr><th>
379 <tr><td>BFLOAT16
380 <tr><td>FLOAT16
381 <tr><td>FLOAT32
382 <tr><td>QASYMMS8
383 <tr><td>QASYMMU8
384 <tr><td>QSYMMS16
385 </table>
386<tr>
387 <td>CpuAcc
388 <td>
389 <ul>
390 <li>NHWC
391 <li>NCHW
392 </ul>
393 <td>
394 <table>
395 <tr><th>
396 <tr><td>All
397 </table>
398<tr>
399 <td>GpuAcc
400 <td>
401 <ul>
402 <li>NHWC
403 <li>NCHW
404 </ul>
405 <td>
406 <table>
407 <tr><th>
408 <tr><td>All
409 </table>
410<tr>
411 <td rowspan="3">CastLayer
412 <td rowspan="3" style="width:200px;"> Layer to cast a tensor to a type.
413 <td rowspan="3">
414 <ul>
415 <li>ANEURALNETWORKS_CAST
416 </ul>
417 <td>CpuRef
418 <td>
419 <ul>
420 <li>All
421 </ul>
422 <td>
423 <table>
424 <tr><th>
425 <tr><td>BFLOAT16
426 <tr><td>FLOAT16
427 <tr><td>FLOAT32
428 <tr><td>QSYMMS8
429 <tr><td>QASYMMS8
430 <tr><td>QASYMMU8
431 <tr><td>QSYMMS16
432 <tr><td>SIGNED32
433 </table>
434<tr>
435 <td>CpuAcc
436 <td>
437 <ul>
438 <li>All
439 </ul>
440 <td>
441 <table>
442 <tr><th>
443 <tr><td>QASYMMS8
444 <tr><td>QASYMMU8
445 <tr><td>FLOAT16
446 <tr><td>SIGNED32
447 <tr><td>FLOAT32
448 </table>
449<tr>
450 <td>GpuAcc
451 <td>
452 <ul>
453 <li>All
454 </ul>
455 <td>
456 <table>
457 <tr><th>
458 <tr><td>QASYMMS8
459 <tr><td>QASYMMU8
460 <tr><td>SIGNED32
461 <tr><td>FLOAT16
462 <tr><td>FLOAT32
463 </table>
464<tr>
Teresa Charlincd203852021-09-24 18:15:39 +0100465 <td rowspan="3">ChannelShuffleLayer
466 <td rowspan="3" style="width:200px;"> Layer to reorganize the channels of a tensor.
467 <td rowspan="3">
468 <ul>
469 <li>ANEURALNETWORKS_CHANNEL_SHUFFLE
470 </ul>
471 <td>CpuRef
472 <td>
473 <ul>
474 <li>All
475 </ul>
476 <td>
477 <table>
478 <tr><th>
479 <tr><td>FLOAT16
480 <tr><td>FLOAT32
481 <tr><td>QSYMMS8
482 <tr><td>QASYMMS8
483 <tr><td>QASYMMU8
484 </table>
485<tr>
486 <td>CpuAcc
487 <td>
488 <ul>
489 <li>All
490 </ul>
491 <td>
492 <table>
493 <tr><th>
494 <tr><td>QASYMMS8
495 <tr><td>QASYMMU8
496 <tr><td>FLOAT16
497 <tr><td>FLOAT32
498 </table>
499<tr>
500 <td>GpuAcc
501 <td>
502 <ul>
503 <li>All
504 </ul>
505 <td>
506 <table>
507 <tr><th>
508 <tr><td>QASYMMS8
509 <tr><td>QASYMMU8
510 <tr><td>FLOAT16
511 <tr><td>FLOAT32
512 </table>
513<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100514 <td rowspan="3">ComparisonLayer
515 <td rowspan="3" style="width:200px;"> Layer to compare 2 tensors.
516 <td rowspan="3">
517 <ul>
518 <li>ANEURALNETWORKS_EQUAL
519 <li>ANEURALNETWORKS_GREATER
520 <li>ANEURALNETWORKS_GREATER_EQUAL
521 <li>ANEURALNETWORKS_LESS
522 <li>ANEURALNETWORKS_LESS_EQUAL
523 <li>ANEURALNETWORKS_NOT_EQUAL
524 </ul>
525 <td>CpuRef
526 <td>
527 <ul>
528 <li>All
529 </ul>
530 <td>
531 <table>
532 <tr><th>
533 <tr><td>BFLOAT16
534 <tr><td>FLOAT16
535 <tr><td>FLOAT32
536 <tr><td>BOOLEAN
537 <tr><td>QASYMMS8
538 <tr><td>QASYMMU8
539 <tr><td>QSYMMS16
540 <tr><td>SIGNED32
541 </table>
542<tr>
543 <td>CpuAcc
544 <td>
545 <ul>
546 <li>All
547 </ul>
548 <td>
549 <table>
550 <tr><th>
551 <tr><td>All
552 </table>
553<tr>
554 <td>GpuAcc
555 <td>
556 <ul>
557 <li>All
558 </ul>
559 <td>
560 <table>
561 <tr><th>
562 <tr><td>All
563 </table>
564<tr>
565 <td rowspan="3">ConcatLayer
566 <td rowspan="3" style="width:200px;"> Layer to concatenate tensors along a given axis.
567 <td rowspan="3">
568 <ul>
569 <li>ANEURALNETWORKS_CONCATENATION
570 </ul>
571 <td>CpuRef
572 <td>
573 <ul>
574 <li>All
575 </ul>
576 <td>
577 <table>
578 <tr><th>
579 <tr><td>BFLOAT16
580 <tr><td>FLOAT16
581 <tr><td>FLOAT32
582 <tr><td>QASYMMS8
583 <tr><td>QASYMMU8
584 <tr><td>QSYMMS16
585 </table>
586<tr>
587 <td>CpuAcc
588 <td>
589 <ul>
590 <li>All
591 </ul>
592 <td>
593 <table>
594 <tr><th>
595 <tr><td>QASYMMU8
596 <tr><td>QASYMMS8
597 <tr><td>FLOAT16
598 <tr><td>FLOAT32
599 </table>
600<tr>
601 <td>GpuAcc
602 <td>
603 <ul>
604 <li>All
605 </ul>
606 <td>
607 <table>
608 <tr><th>
609 <tr><td>QASYMMU8
610 <tr><td>QASYMMS8
611 <tr><td>FLOAT16
612 <tr><td>FLOAT32
613 </table>
614<tr>
615 <td rowspan="3">ConstantLayer
616 <td rowspan="3" style="width:200px;"> Layer to provide a constant tensor.
617 <td rowspan="3">
618 <ul>
619 <li>N/A
620 </ul>
621 <td>CpuRef
622 <td>
623 <ul>
624 <li>All
625 </ul>
626 <td>
627 <table>
628 <tr><th>
629 <tr><td>BFLOAT16
630 <tr><td>FLOAT16
631 <tr><td>FLOAT32
632 <tr><td>QASYMMS8
633 <tr><td>QASYMMU8
634 <tr><td>QSYMMS8
635 <tr><td>QSYMMS16
636 <tr><td>SIGNED32
637 </table>
638<tr>
639 <td>CpuAcc
640 <td>
641 <ul>
642 <li>All
643 </ul>
644 <td>
645 <table>
646 <tr><th>
647 <tr><td>All
648 </table>
649<tr>
650 <td>GpuAcc
651 <td>
652 <ul>
653 <li>All
654 </ul>
655 <td>
656 <table>
657 <tr><th>
658 <tr><td>All
659 </table>
660<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100661 <td rowspan="3">ConvertFp16ToFp32Layer
662 <td rowspan="3" style="width:200px;"> Layer to convert Float16 tensor to Float32 tensor.
663 <td rowspan="3">
664 <ul>
665 <li>N/A
666 </ul>
667 <td>CpuRef
668 <td>
669 <ul>
670 <li>All
671 </ul>
672 <td>
673 <table>
674 <tr><th>
675 <tr><td>FLOAT16
676 <tr><td>FLOAT32
677 </table>
678<tr>
679 <td>CpuAcc
680 <td>
681 <ul>
682 <li>All
683 </ul>
684 <td>
685 <table>
686 <tr><th>
687 <tr><td>FLOAT16
688 <tr><td>FLOAT32
689 </table>
690<tr>
691 <td>GpuAcc
692 <td>
693 <ul>
694 <li>All
695 </ul>
696 <td>
697 <table>
698 <tr><th>
699 <tr><td>FLOAT16
700 <tr><td>FLOAT32
701 </table>
702<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100703 <td rowspan="3">ConvertFp32ToFp16Layer
704 <td rowspan="3" style="width:200px;"> Layer to convert Float32 tensor to Float16 tensor.
705 <td rowspan="3">
706 <ul>
707 <li>N/A
708 </ul>
709 <td>CpuRef
710 <td>
711 <ul>
712 <li>All
713 </ul>
714 <td>
715 <table>
716 <tr><th>
717 <tr><td>FLOAT16
718 <tr><td>FLOAT32
719 </table>
720<tr>
721 <td>CpuAcc
722 <td>
723 <ul>
724 <li>All
725 </ul>
726 <td>
727 <table>
728 <tr><th>
729 <tr><td>FLOAT16
730 <tr><td>FLOAT32
731 </table>
732<tr>
733 <td>GpuAcc
734 <td>
735 <ul>
736 <li>All
737 </ul>
738 <td>
739 <table>
740 <tr><th>
741 <tr><td>FLOAT16
742 <tr><td>FLOAT32
743 </table>
744<tr>
745 <td rowspan="3">Convolution2dLayer
746 <td rowspan="3" style="width:200px;"> Layer to compute a convolution operation.
747 <td rowspan="3">
748 <ul>
749 <li>ANEURALNETWORKS_CONV_2D
750 <li>ANEURALNETWORKS_GROUPED_CONV_2D
751 </ul>
752 <td>CpuRef
753 <td>
754 <ul>
755 <li>All
756 </ul>
757 <td>
758 <table>
759 <tr><th>
760 <tr><td>BFLOAT16
761 <tr><td>FLOAT16
762 <tr><td>FLOAT32
763 <tr><td>QASYMMS8
764 <tr><td>QASYMMU8
765 <tr><td>QSYMMS16
766 </table>
767<tr>
768 <td>CpuAcc
769 <td>
770 <ul>
771 <li>NHWC
772 <li>NCHW
773 </ul>
774 <td>
775 <table>
776 <tr><th>
777 <tr><td>SIGNED32
778 <tr><td>FLOAT16
779 <tr><td>FLOAT32
780 <tr><td>QASYMMU8
781 <tr><td>QASYMMS8
782 <tr><td>QUANTIZEDSYMM8PERAXIS
783 </table>
784<tr>
785 <td>GpuAcc
786 <td>
787 <ul>
788 <li>NHWC
789 <li>NCHW
790 </ul>
791 <td>
792 <table>
793 <tr><th>
794 <tr><td>SIGNED32
795 <tr><td>FLOAT16
796 <tr><td>FLOAT32
797 <tr><td>QASYMMU8
798 <tr><td>QASYMMS8
799 <tr><td>QUANTIZEDSYMM8PERAXIS
800 </table>
801<tr>
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100802 <td rowspan="3">Convolution3dLayer
803 <td rowspan="3" style="width:200px;"> Layer to compute a 3D convolution operation.
804 <td rowspan="3">
805 <ul>
806 <li>N/A
807 </ul>
808 <td>CpuRef
809 <td>
810 <ul>
811 <li>NDHWC
812 </ul>
813 <td>
814 <table>
815 <tr><th>
816 <tr><td>BFLOAT16
817 <tr><td>FLOAT16
818 <tr><td>FLOAT32
819 <tr><td>QASYMMS8
820 <tr><td>QASYMMU8
821 <tr><td>QSYMMS8
822 <tr><td>QSYMMS16
823 </table>
824<tr>
825 <td>CpuAcc
826 <td>
827 <ul>
828 <li>N/A
829 </ul>
830 <td>
831 <ul>
832 <li>N/A
833 </ul>
834<tr>
835 <td>GpuAcc
836 <td>
837 <ul>
838 <li>N/A
839 </ul>
840 <td>
841 <ul>
842 <li>N/A
843 </ul>
844<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100845 <td rowspan="1">DebugLayer
846 <td rowspan="1" style="width:200px;"> Layer to print out inter layer tensor information.
847 <td rowspan="1">
848 <ul>
849 <li>N/A
850 </ul>
851 <td>CpuRef
852 <td>
853 <ul>
854 <li>All
855 </ul>
856 <td>
857 <table>
858 <tr><th>
859 <tr><td>BFLOAT16
860 <tr><td>FLOAT16
861 <tr><td>FLOAT32
862 <tr><td>QASYMMS8
863 <tr><td>QASYMMU8
864 <tr><td>QSYMMS8
865 <tr><td>QSYMMS16
866 <tr><td>SIGNED32
867 </table>
868<tr>
869 <td rowspan="3">DepthToSpaceLayer
870 <td rowspan="3" style="width:200px;"> Layer to perform Depth to Space transformation.
871 <td rowspan="3">
872 <ul>
873 <li>ANEURALNETWORKS_DEPTH_TO_SPACE
874 </ul>
875 <td>CpuRef
876 <td>
877 <ul>
878 <li>All
879 </ul>
880 <td>
881 <table>
882 <tr><th>
883 <tr><td>BFLOAT16
884 <tr><td>FLOAT16
885 <tr><td>FLOAT32
886 <tr><td>QASYMMS8
887 <tr><td>QASYMMU8
888 <tr><td>QSYMMS16
889 </table>
890<tr>
891 <td>CpuAcc
892 <td>
893 <ul>
894 <li>NHWC
895 <li>NCHW
896 </ul>
897 <td>
898 <table>
899 <tr><th>
900 <tr><td>All
901 </table>
902<tr>
903 <td>GpuAcc
904 <td>
905 <ul>
906 <li>NHWC
907 <li>NCHW
908 </ul>
909 <td>
910 <table>
911 <tr><th>
912 <tr><td>All
913 </table>
914<tr>
915 <td rowspan="3">DepthwiseConvolution2dLayer
916 <td rowspan="3" style="width:200px;"> Layer to compute a deconvolution or transpose convolution.
917 <td rowspan="3">
918 <ul>
919 <li>ANEURALNETWORKS_DEPTHWISE_CONV_2D
920 </ul>
921 <td>CpuRef
922 <td>
923 <ul>
924 <li>All
925 </ul>
926 <td>
927 <table>
928 <tr><th>
929 <tr><td>BFLOAT16
930 <tr><td>FLOAT16
931 <tr><td>FLOAT32
932 <tr><td>QASYMMS8
933 <tr><td>QASYMMU8
934 <tr><td>QSYMMS8
935 <tr><td>QSYMMS16
936 </table>
937<tr>
938 <td>CpuAcc
939 <td>
940 <ul>
941 <li>NHWC
942 <li>NCHW
943 </ul>
944 <td>
945 <table>
946 <tr><th>
947 <tr><td>FLOAT16
948 <tr><td>FLOAT32
949 <tr><td>SIGNED32
950 <tr><td>QASYMMU8
951 <tr><td>QASYMMS8
952 <tr><td>QUANTIZEDSYMM8PERAXIS
953 </table>
954<tr>
955 <td>GpuAcc
956 <td>
957 <ul>
958 <li>NHWC
959 <li>NCHW
960 </ul>
961 <td>
962 <table>
963 <tr><th>
964 <tr><td>FLOAT16
965 <tr><td>FLOAT32
966 <tr><td>SIGNED32
967 <tr><td>QASYMMU8
968 <tr><td>QASYMMS8
969 <tr><td>QUANTIZEDSYMM8PERAXIS
970 </table>
971<tr>
972 <td rowspan="3">DequantizeLayer
973 <td rowspan="3" style="width:200px;"> Layer to dequantize the values in a tensor.
974 <td rowspan="3">
975 <ul>
976 <li>ANEURALNETWORKS_DEQUANTIZE
977 </ul>
978 <td>CpuRef
979 <td>
980 <ul>
981 <li>All
982 </ul>
983 <td>
984 <table>
985 <tr><th>
986 <tr><td>QASYMMS8
987 <tr><td>QASYMMU8
988 <tr><td>QSYMMS8
989 <tr><td>QSYMMS16
990 </table>
991<tr>
992 <td>CpuAcc
993 <td>
994 <ul>
995 <li>All
996 </ul>
997 <td>
998 <table>
999 <tr><th>
1000 <tr><td>FLOAT16
1001 <tr><td>FLOAT32
1002 <tr><td>QASYMMU8
1003 <tr><td>QASYMMS8
1004 <tr><td>QUANTIZEDSYMM8PERAXIS
1005 <tr><td>QSYMMS8
1006 <tr><td>QSYMMS16
1007 </table>
1008<tr>
1009 <td>GpuAcc
1010 <td>
1011 <ul>
1012 <li>All
1013 </ul>
1014 <td>
1015 <table>
1016 <tr><th>
1017 <tr><td>FLOAT16
1018 <tr><td>FLOAT32
1019 <tr><td>QASYMMU8
1020 <tr><td>QASYMMS8
1021 <tr><td>QUANTIZEDSYMM8PERAXIS
1022 <tr><td>QSYMMS8
1023 <tr><td>QSYMMS16
1024 </table>
1025<tr>
1026 <td rowspan="2">DetectionPostProcessLayer
1027 <td rowspan="2" style="width:200px;"> Layer to generate the detection output based on center size encoded boxes, class prediction and anchors by doing non maximum suppression (NMS).
1028 <td rowspan="2">
1029 <ul>
1030 <li>ANEURALNETWORKS_DETECTION_POSTPROCESSING
1031 </ul>
1032 <td>CpuRef
1033 <td>
1034 <ul>
1035 <li>All
1036 </ul>
1037 <td>
1038 <table>
1039 <tr><th>
1040 <tr><td>BFLOAT16
1041 <tr><td>FLOAT16
1042 <tr><td>FLOAT32
1043 <tr><td>QASYMMS8
1044 <tr><td>QASYMMU8
1045 <tr><td>QSYMMS16
1046 </table>
1047<tr>
1048 <td>CpuAcc
1049 <td>
1050 <ul>
1051 <li>All
1052 </ul>
1053 <td>
1054 <table>
1055 <tr><th>
1056 <tr><td>QASYMMU8
1057 <tr><td>QASYMMS8
1058 <tr><td>FLOAT32
1059 </table>
1060<tr>
1061 <td rowspan="3">DivisionLayer
1062 <td rowspan="3" style="width:200px;"> Layer to divide 2 tensors.
1063 <td rowspan="3">
1064 <ul>
1065 <li>ANEURALNETWORKS_DIV
1066 </ul>
1067 <td>CpuRef
1068 <td>
1069 <ul>
1070 <li>All
1071 </ul>
1072 <td>
1073 <table>
1074 <tr><th>
1075 <tr><td>BFLOAT16
1076 <tr><td>FLOAT16
1077 <tr><td>FLOAT32
1078 <tr><td>QASYMMS8
1079 <tr><td>QASYMMU8
1080 <tr><td>QSYMMS16
1081 <tr><td>SIGNED32
1082 </table>
1083<tr>
1084 <td>CpuAcc
1085 <td>
1086 <ul>
1087 <li>All
1088 </ul>
1089 <td>
1090 <table>
1091 <tr><th>
1092 <tr><td>FLOAT16
1093 <tr><td>FLOAT32
1094 </table>
1095<tr>
1096 <td>GpuAcc
1097 <td>
1098 <ul>
1099 <li>All
1100 </ul>
1101 <td>
1102 <table>
1103 <tr><th>
1104 <tr><td>FLOAT16
1105 <tr><td>FLOAT32
1106 </table>
1107<tr>
1108 <td rowspan="3">ElementwiseBaseLayer
1109 <td rowspan="3" style="width:200px;"> Layer to perform Add - Div - Max - Min - Mul operations.
1110 <td rowspan="3">
1111 <ul>
1112 <li>ANEURALNETWORKS_ADD
1113 <li>ANEURALNETWORKS_DIV
1114 <li>ANEURALNETWORKS_MAXIMUM
1115 <li>ANEURALNETWORKS_MINIMUM
1116 <li>ANEURALNETWORKS_MUL
1117 </ul>
1118 <td>CpuRef
1119 <td>
1120 <ul>
1121 <li>All
1122 </ul>
1123 <td>
1124 <table>
1125 <tr><th>
1126 <tr><td>BFLOAT16
1127 <tr><td>FLOAT16
1128 <tr><td>FLOAT32
1129 <tr><td>QASYMMS8
1130 <tr><td>QASYMMU8
1131 <tr><td>QSYMMS16
1132 <tr><td>SIGNED32
1133 </table>
1134<tr>
1135 <td>CpuAcc
1136 <td>
1137 <ul>
1138 <li>All
1139 </ul>
1140 <td>
1141 <table>
1142 <tr><th>
1143 <tr><td>QASYMMU8
1144 <tr><td>QASYMMS8
1145 <tr><td>QSYMMS16
1146 <tr><td>SIGNED32
1147 <tr><td>FLOAT16
1148 <tr><td>FLOAT32
1149 </table>
1150<tr>
1151 <td>GpuAcc
1152 <td>
1153 <ul>
1154 <li>All
1155 </ul>
1156 <td>
1157 <table>
1158 <tr><th>
1159 <tr><td>QASYMMU8
1160 <tr><td>QASYMMS8
1161 <tr><td>QSYMMS16
1162 <tr><td>SIGNED32
1163 <tr><td>FLOAT16
1164 <tr><td>FLOAT32
1165 </table>
1166<tr>
John Mcloughlin0ec00872023-05-15 17:03:49 +01001167 <td rowspan="3">ElementwiseBinaryLayer
1168 <td rowspan="3" style="width:200px;"> Layer to perform Power and Square Difference operations.
1169 <td rowspan="3">
1170 <ul>
1171 <li>ANEURALNETWORKS_POW
1172 </ul>
1173 <td>CpuRef
1174 <td>
1175 <ul>
1176 <li>All
1177 </ul>
1178 <td>
1179 <table>
1180 <tr><th>
1181 <tr><td>FLOAT16
1182 <tr><td>FLOAT32
1183 <tr><td>QASYMMS8
1184 <tr><td>QASYMMU8
1185 <tr><td>QSYMMS16
1186 <tr><td>SIGNED32
1187 </table>
1188<tr>
1189 <td>CpuAcc
1190 <td>
1191 <ul>
1192 <li>All
1193 </ul>
1194 <td>
1195 <table>
1196 <tr><th>
1197 <tr><td>FLOAT16
1198 <tr><td>FLOAT32
1199 </table>
1200<tr>
1201 <td>GpuAcc
1202 <td>
1203 <ul>
1204 <li>All
1205 </ul>
1206 <td>
1207 <table>
1208 <tr><th>
1209 <tr><td>FLOAT16
1210 <tr><td>FLOAT32
1211 </table>
1212<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01001213 <td rowspan="3">ElementwiseUnaryLayer
Nikhil Raj930e1a22023-06-08 09:49:46 +01001214 <td rowspan="3" style="width:200px;"> Layer to perform Rsqrt - Exp - Neg - Log - Abs - Sin - Sqrt - Ceil operations.
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01001215 <td rowspan="3">
1216 <ul>
1217 <li>ANEURALNETWORKS_ABS
1218 <li>ANEURALNETWORKS_EXP
1219 <li>ANEURALNETWORKS_LOG
1220 <li>ANEURALNETWORKS_NEG
1221 <li>ANEURALNETWORKS_RSQRT
1222 <li>ANEURALNETWORKS_SIN
1223 <li>ANEURALNETWORKS_SQRT
1224 </ul>
1225 <td>CpuRef
1226 <td>
1227 <ul>
1228 <li>All
1229 </ul>
1230 <td>
1231 <table>
1232 <tr><th>
1233 <tr><td>BFLOAT16
1234 <tr><td>FLOAT16
1235 <tr><td>FLOAT32
1236 <tr><td>QASYMMS8
1237 <tr><td>QASYMMU8
1238 <tr><td>QSYMMS16
1239 </table>
1240<tr>
1241 <td>CpuAcc
1242 <td>
1243 <ul>
1244 <li>All
1245 </ul>
1246 <td>
1247 <table>
1248 <tr><th>
1249 <tr><td>FLOAT16
1250 <tr><td>FLOAT32
1251 <tr><td>SIGNED32
1252 </table>
1253<tr>
1254 <td>GpuAcc
1255 <td>
1256 <ul>
1257 <li>All
1258 </ul>
1259 <td>
1260 <table>
1261 <tr><th>
1262 <tr><td>FLOAT16
1263 <tr><td>FLOAT32
1264 </table>
1265<tr>
1266 <td rowspan="1">FakeQuantizationLayer
1267 <td rowspan="1" style="width:200px;"> Layer to quantize float values and dequantize afterwards. The current implementation does not dequantize the values.
1268 <td rowspan="1">
1269 <ul>
1270 <li>N/A
1271 </ul>
1272 <td>CpuRef
1273 <td>
1274 <ul>
1275 <li>All
1276 </ul>
1277 <td>
1278 <table>
1279 <tr><th>
1280 <tr><td>FLOAT32
1281 </table>
1282<tr>
1283 <td rowspan="3">FillLayer
1284 <td rowspan="3" style="width:200px;"> Layer to set the values of a tensor with a given value.
1285 <td rowspan="3">
1286 <ul>
1287 <li>ANEURALNETWORKS_FILL
1288 </ul>
1289 <td>CpuRef
1290 <td>
1291 <ul>
1292 <li>All
1293 </ul>
1294 <td>
1295 <table>
1296 <tr><th>
1297 <tr><td>FLOAT16
1298 <tr><td>FLOAT32
1299 <tr><td>SIGNED32
1300 </table>
1301<tr>
1302 <td>CpuAcc
1303 <td>
1304 <ul>
1305 <li>All
1306 </ul>
1307 <td>
1308 <table>
1309 <tr><th>
1310 <tr><td>All
1311 </table>
1312<tr>
1313 <td>GpuAcc
1314 <td>
1315 <ul>
1316 <li>All
1317 </ul>
1318 <td>
1319 <table>
1320 <tr><th>
1321 <tr><td>All
1322 </table>
1323<tr>
1324 <td rowspan="3">FloorLayer
1325 <td rowspan="3" style="width:200px;"> Layer to round the value to the lowest whole number.
1326 <td rowspan="3">
1327 <ul>
1328 <li>ANEURALNETWORKS_FLOOR
1329 </ul>
1330 <td>CpuRef
1331 <td>
1332 <ul>
1333 <li>All
1334 </ul>
1335 <td>
1336 <table>
1337 <tr><th>
1338 <tr><td>BFLOAT16
1339 <tr><td>FLOAT16
1340 <tr><td>FLOAT32
1341 </table>
1342<tr>
1343 <td>CpuAcc
1344 <td>
1345 <ul>
1346 <li>All
1347 </ul>
1348 <td>
1349 <table>
1350 <tr><th>
1351 <tr><td>FLOAT32
1352 <tr><td>FLOAT16
1353 </table>
1354<tr>
1355 <td>GpuAcc
1356 <td>
1357 <ul>
1358 <li>All
1359 </ul>
1360 <td>
1361 <table>
1362 <tr><th>
1363 <tr><td>FLOAT32
1364 <tr><td>FLOAT16
1365 </table>
1366<tr>
1367 <td rowspan="3">FullyConnectedLayer
1368 <td rowspan="3" style="width:200px;"> Layer to perform a fully connected / dense operation.
1369 <td rowspan="3">
1370 <ul>
1371 <li>ANEURALNETWORKS_FULLY_CONNECTED
1372 </ul>
1373 <td>CpuRef
1374 <td>
1375 <ul>
1376 <li>All
1377 </ul>
1378 <td>
1379 <table>
1380 <tr><th>
1381 <tr><td>BFLOAT16
1382 <tr><td>FLOAT16
1383 <tr><td>FLOAT32
1384 <tr><td>QASYMMS8
1385 <tr><td>QASYMMU8
1386 <tr><td>QSYMMS16
1387 </table>
1388<tr>
1389 <td>CpuAcc
1390 <td>
1391 <ul>
1392 <li>NHWC
1393 <li>NCHW
1394 </ul>
1395 <td>
1396 <table>
1397 <tr><th>
1398 <tr><td>SIGNED32
1399 <tr><td>FLOAT16
1400 <tr><td>FLOAT32
1401 <tr><td>QASYMMU8
1402 <tr><td>QASYMMS8
1403 </table>
1404<tr>
1405 <td>GpuAcc
1406 <td>
1407 <ul>
1408 <li>NHWC
1409 <li>NCHW
1410 </ul>
1411 <td>
1412 <table>
1413 <tr><th>
1414 <tr><td>SIGNED32
1415 <tr><td>FLOAT16
1416 <tr><td>FLOAT32
1417 <tr><td>QASYMMU8
1418 <tr><td>QASYMMS8
1419 </table>
1420<tr>
1421 <td rowspan="3">GatherLayer
1422 <td rowspan="3" style="width:200px;"> Layer to perform the gather operation along the chosen axis.
1423 <td rowspan="3">
1424 <ul>
1425 <li>ANEURALNETWORKS_GATHER
1426 </ul>
1427 <td>CpuRef
1428 <td>
1429 <ul>
1430 <li>All
1431 </ul>
1432 <td>
1433 <table>
1434 <tr><th>
1435 <tr><td>BFLOAT16
1436 <tr><td>FLOAT16
1437 <tr><td>FLOAT32
1438 <tr><td>QASYMMS8
1439 <tr><td>QASYMMU8
1440 <tr><td>QSYMMS16
1441 <tr><td>SIGNED32
1442 </table>
1443<tr>
1444 <td>CpuAcc
1445 <td>
1446 <ul>
1447 <li>All
1448 </ul>
1449 <td>
1450 <table>
1451 <tr><th>
1452 <tr><td>All
1453 </table>
1454<tr>
1455 <td>GpuAcc
1456 <td>
1457 <ul>
1458 <li>All
1459 </ul>
1460 <td>
1461 <table>
1462 <tr><th>
1463 <tr><td>All
1464 </table>
1465<tr>
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001466 <td rowspan="3">GatherNdLayer
1467 <td rowspan="3" style="width:200px;"> Layer to perform the gatherNd operation.
1468 <td rowspan="3">
1469 <ul>
1470 <li>N/A
1471 </ul>
1472 <td>CpuRef
1473 <td>
1474 <ul>
1475 <li>All
1476 </ul>
1477 <td>
1478 <table>
1479 <tr><th>
1480 <tr><td>BFLOAT16
1481 <tr><td>FLOAT16
1482 <tr><td>FLOAT32
1483 <tr><td>QASYMMS8
1484 <tr><td>QASYMMU8
1485 <tr><td>QSYMMS16
1486 <tr><td>SIGNED32
1487 </table>
1488<tr>
1489 <td>CpuAcc
1490 <td>
1491 <ul>
Teresa Charlinbd22c7d2022-04-26 18:14:12 +01001492 <li>All
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001493 </ul>
1494 <td>
Teresa Charlinbd22c7d2022-04-26 18:14:12 +01001495 <table>
1496 <tr><th>
1497 <tr><td>BFLOAT16
1498 <tr><td>FLOAT16
1499 <tr><td>FLOAT32
1500 <tr><td>QASYMMS8
1501 <tr><td>QASYMMU8
1502 <tr><td>QSYMMS16
1503 <tr><td>SIGNED32
1504 </table>
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001505<tr>
1506 <td>GpuAcc
1507 <td>
1508 <ul>
Teresa Charlin989e2f62022-04-27 16:26:11 +01001509 <li>All
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001510 </ul>
1511 <td>
Teresa Charlin989e2f62022-04-27 16:26:11 +01001512 <table>
1513 <tr><th>
1514 <tr><td>BFLOAT16
1515 <tr><td>FLOAT16
1516 <tr><td>FLOAT32
1517 <tr><td>QASYMMS8
1518 <tr><td>QASYMMU8
1519 <tr><td>QSYMMS16
1520 <tr><td>SIGNED32
1521 </table>
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001522<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01001523 <td rowspan="1">InputLayer
1524 <td rowspan="1" style="width:200px;"> Special layer used to provide input data to the computational network.
1525 <td rowspan="1">
1526 <ul>
1527 <li>N/A
1528 </ul>
1529 <td>All
1530 <td>
1531 <ul>
1532 <li>All
1533 </ul>
1534 <td>
1535 <table>
1536 <tr><th>
1537 <tr><td>All
1538 </table>
1539<tr>
1540 <td rowspan="3">InstanceNormalizationLayer
1541 <td rowspan="3" style="width:200px;"> Layer to perform an instance normalization on a given axis.
1542 <td rowspan="3">
1543 <ul>
1544 <li>ANEURALNETWORKS_INSTANCE_NORMALIZATION
1545 </ul>
1546 <td>CpuRef
1547 <td>
1548 <ul>
1549 <li>All
1550 </ul>
1551 <td>
1552 <table>
1553 <tr><th>
1554 <tr><td>BFLOAT16
1555 <tr><td>FLOAT16
1556 <tr><td>FLOAT32
1557 </table>
1558<tr>
1559 <td>CpuAcc
1560 <td>
1561 <ul>
1562 <li>NHWC
1563 <li>NCHW
1564 </ul>
1565 <td>
1566 <table>
1567 <tr><th>
1568 <tr><td>FLOAT16
1569 <tr><td>FLOAT32
1570 </table>
1571<tr>
1572 <td>GpuAcc
1573 <td>
1574 <ul>
1575 <li>NHWC
1576 <li>NCHW
1577 </ul>
1578 <td>
1579 <table>
1580 <tr><th>
1581 <tr><td>FLOAT16
1582 <tr><td>FLOAT32
1583 </table>
1584<tr>
1585 <td rowspan="3">L2NormalizationLayer
1586 <td rowspan="3" style="width:200px;"> Layer to perform an L2 normalization on a given axis.
1587 <td rowspan="3">
1588 <ul>
1589 <li>ANEURALNETWORKS_L2_NORMALIZATION
1590 </ul>
1591 <td>CpuRef
1592 <td>
1593 <ul>
1594 <li>All
1595 </ul>
1596 <td>
1597 <table>
1598 <tr><th>
1599 <tr><td>BFLOAT16
1600 <tr><td>FLOAT16
1601 <tr><td>FLOAT32
1602 <tr><td>QASYMMS8
1603 <tr><td>QASYMMU8
1604 <tr><td>QSYMMS16
1605 </table>
1606<tr>
1607 <td>CpuAcc
1608 <td>
1609 <ul>
1610 <li>NHWC
1611 <li>NCHW
1612 </ul>
1613 <td>
1614 <table>
1615 <tr><th>
1616 <tr><td>FLOAT16
1617 <tr><td>FLOAT32
1618 </table>
1619<tr>
1620 <td>GpuAcc
1621 <td>
1622 <ul>
1623 <li>NHWC
1624 <li>NCHW
1625 </ul>
1626 <td>
1627 <table>
1628 <tr><th>
1629 <tr><td>FLOAT16
1630 <tr><td>FLOAT32
1631 </table>
1632<tr>
1633 <td rowspan="3">LogSoftmaxLayer
1634 <td rowspan="3" style="width:200px;"> Layer to perform the log softmax activations given logits.
1635 <td rowspan="3">
1636 <ul>
1637 <li>N/A
1638 </ul>
1639 <td>CpuRef
1640 <td>
1641 <ul>
1642 <li>All
1643 </ul>
1644 <td>
1645 <table>
1646 <tr><th>
1647 <tr><td>BFLOAT16
1648 <tr><td>FLOAT16
1649 <tr><td>FLOAT32
1650 </table>
1651<tr>
1652 <td>CpuAcc
1653 <td>
1654 <ul>
1655 <li>All
1656 </ul>
1657 <td>
1658 <table>
1659 <tr><th>
1660 <tr><td>QASYMMU8
1661 <tr><td>QASYMMS8
1662 <tr><td>FLOAT16
1663 <tr><td>FLOAT32
1664 </table>
1665<tr>
1666 <td>GpuAcc
1667 <td>
1668 <ul>
1669 <li>All
1670 </ul>
1671 <td>
1672 <table>
1673 <tr><th>
1674 <tr><td>QASYMMU8
1675 <tr><td>QASYMMS8
1676 <tr><td>FLOAT16
1677 <tr><td>FLOAT32
1678 </table>
1679<tr>
1680 <td rowspan="3">LogicalBinaryLayer
1681 <td rowspan="3" style="width:200px;"> Layer to perform Logical AND - Logical NOT - Logical OR operations.
1682 <td rowspan="3">
1683 <ul>
1684 <li>ANEURALNETWORKS_LOGICAL_AND
1685 <li>ANEURALNETWORKS_LOGICAL_NOT
1686 <li>ANEURALNETWORKS_LOGICAL_OR
1687 </ul>
1688 <td>CpuRef
1689 <td>
1690 <ul>
1691 <li>All
1692 </ul>
1693 <td>
1694 <table>
1695 <tr><th>
1696 <tr><td>BOOLEAN
1697 </table>
1698<tr>
1699 <td>CpuAcc
1700 <td>
1701 <ul>
1702 <li>All
1703 </ul>
1704 <td>
1705 <table>
1706 <tr><th>
1707 <tr><td>BOOLEAN
1708 </table>
1709<tr>
1710 <td>GpuAcc
1711 <td>
1712 <ul>
1713 <li>All
1714 </ul>
1715 <td>
1716 <table>
1717 <tr><th>
1718 <tr><td>BOOLEAN
1719 </table>
1720<tr>
1721 <td rowspan="3">LstmLayer
1722 <td rowspan="3" style="width:200px;"> Layer to perform a single time step in a Long Short-Term Memory (LSTM) operation.
1723 <td rowspan="3">
1724 <ul>
1725 <li>ANEURALNETWORKS_LSTM
1726 </ul>
1727 <td>CpuRef
1728 <td>
1729 <ul>
1730 <li>All
1731 </ul>
1732 <td>
1733 <table>
1734 <tr><th>
1735 <tr><td>BFLOAT16
1736 <tr><td>FLOAT16
1737 <tr><td>QSYMMS16
1738 </table>
1739<tr>
1740 <td>CpuAcc
1741 <td>
1742 <ul>
1743 <li>All
1744 </ul>
1745 <td>
1746 <table>
1747 <tr><th>
1748 <tr><td>FLOAT16
1749 <tr><td>FLOAT32
1750 </table>
1751<tr>
1752 <td>GpuAcc
1753 <td>
1754 <ul>
1755 <li>All
1756 </ul>
1757 <td>
1758 <table>
1759 <tr><th>
1760 <tr><td>FLOAT16
1761 <tr><td>FLOAT32
1762 </table>
1763<tr>
1764 <td rowspan="3">MapLayer
1765 <td rowspan="3" style="width:200px;"> Layer to perform map operation on tensor.
1766 <td rowspan="3">
1767 <ul>
1768 <li>N/A
1769 </ul>
1770 <td>CpuRef
1771 <td>
1772 <ul>
1773 <li>All
1774 </ul>
1775 <td>
1776 <table>
1777 <tr><th>
1778 <tr><td>All
1779 </table>
1780<tr>
1781 <td>CpuAcc
1782 <td>
1783 <ul>
1784 <li>All
1785 </ul>
1786 <td>
1787 <table>
1788 <tr><th>
1789 <tr><td>All
1790 </table>
1791<tr>
1792 <td>GpuAcc
1793 <td>
1794 <ul>
1795 <li>All
1796 </ul>
1797 <td>
1798 <table>
1799 <tr><th>
1800 <tr><td>All
1801 </table>
1802<tr>
1803 <td rowspan="3">MaximumLayer
1804 <td rowspan="3" style="width:200px;"> Layer to perform an elementwise maximum of two tensors.
1805 <td rowspan="3">
1806 <ul>
1807 <li>N/A
1808 </ul>
1809 <td>CpuRef
1810 <td>
1811 <ul>
1812 <li>All
1813 </ul>
1814 <td>
1815 <table>
1816 <tr><th>
1817 <tr><td>BFLOAT16
1818 <tr><td>FLOAT16
1819 <tr><td>FLOAT32
1820 <tr><td>QASYMMS8
1821 <tr><td>QASYMMU8
1822 <tr><td>QSYMMS16
1823 <tr><td>SIGNED32
1824 </table>
1825<tr>
1826 <td>CpuAcc
1827 <td>
1828 <ul>
1829 <li>All
1830 </ul>
1831 <td>
1832 <table>
1833 <tr><th>
1834 <tr><td>QASYMMU8
1835 <tr><td>QASYMMS8
1836 <tr><td>FLOAT16
1837 <tr><td>FLOAT32
1838 <tr><td>SIGNED32
1839 </table>
1840<tr>
1841 <td>GpuAcc
1842 <td>
1843 <ul>
1844 <li>All
1845 </ul>
1846 <td>
1847 <table>
1848 <tr><th>
1849 <tr><td>QASYMMU8
1850 <tr><td>QASYMMS8
1851 <tr><td>QSYMMS16
1852 <tr><td>FLOAT16
1853 <tr><td>FLOAT32
1854 <tr><td>SIGNED32
1855 </table>
1856<tr>
1857 <td rowspan="3">MeanLayer
1858 <td rowspan="3" style="width:200px;"> Layer to perform reduce mean operation.
1859 <td rowspan="3">
1860 <ul>
1861 <li>ANEURALNETWORKS_MEAN
1862 </ul>
1863 <td>CpuRef
1864 <td>
1865 <ul>
1866 <li>All
1867 </ul>
1868 <td>
1869 <table>
1870 <tr><th>
1871 <tr><td>BFLOAT16
1872 <tr><td>FLOAT16
1873 <tr><td>FLOAT32
1874 <tr><td>QASYMMS8
1875 <tr><td>QASYMMU8
1876 <tr><td>QSYMMS16
1877 </table>
1878<tr>
1879 <td>CpuAcc
1880 <td>
1881 <ul>
1882 <li>All
1883 </ul>
1884 <td>
1885 <table>
1886 <tr><th>
1887 <tr><td>QASYMMU8
1888 <tr><td>QASYMMS8
1889 <tr><td>FLOAT16
1890 <tr><td>FLOAT32
1891 </table>
1892<tr>
1893 <td>GpuAcc
1894 <td>
1895 <ul>
1896 <li>All
1897 </ul>
1898 <td>
1899 <table>
1900 <tr><th>
1901 <tr><td>QASYMMU8
1902 <tr><td>QASYMMS8
1903 <tr><td>FLOAT16
1904 <tr><td>FLOAT32
1905 </table>
1906<tr>
1907 <td rowspan="3">MemCopyLayer
1908 <td rowspan="3" style="width:200px;"> Layer to perform memory copy operation.
1909 <td rowspan="3">
1910 <ul>
1911 <li>N/A
1912 </ul>
1913 <td>CpuRef
1914 <td>
1915 <ul>
1916 <li>All
1917 </ul>
1918 <td>
1919 <table>
1920 <tr><th>
1921 <tr><td>BFLOAT16
1922 <tr><td>FLOAT16
1923 <tr><td>FLOAT32
1924 <tr><td>QASYMMS8
1925 <tr><td>QASYMMU8
1926 <tr><td>QSYMMS16
1927 <tr><td>BOOLEAN
1928 </table>
1929<tr>
1930 <td>CpuAcc
1931 <td>
1932 <ul>
1933 <li>All
1934 </ul>
1935 <td>
1936 <table>
1937 <tr><th>
1938 <tr><td>All
1939 </table>
1940<tr>
1941 <td>GpuAcc
1942 <td>
1943 <ul>
1944 <li>All
1945 </ul>
1946 <td>
1947 <table>
1948 <tr><th>
1949 <tr><td>All
1950 </table>
1951<tr>
1952 <td rowspan="3">MemImportLayer
1953 <td rowspan="3" style="width:200px;"> Layer to perform memory import operation.
1954 <td rowspan="3">
1955 <ul>
1956 <li>N/A
1957 </ul>
1958 <td>CpuRef
1959 <td>
1960 <ul>
1961 <li>All
1962 </ul>
1963 <td>
1964 <table>
1965 <tr><th>
1966 <tr><td>All
1967 </table>
1968<tr>
1969 <td>CpuAcc
1970 <td>
1971 <ul>
1972 <li>All
1973 </ul>
1974 <td>
1975 <table>
1976 <tr><th>
1977 <tr><td>All
1978 </table>
1979<tr>
1980 <td>GpuAcc
1981 <td>
1982 <ul>
1983 <li>All
1984 </ul>
1985 <td>
1986 <table>
1987 <tr><th>
1988 <tr><td>All
1989 </table>
1990<tr>
1991 <td rowspan="3">MergeLayer
1992 <td rowspan="3" style="width:200px;"> Layer to concatenate tensors along a given axis.
1993 <td rowspan="3">
1994 <ul>
1995 <li>ANEURALNETWORKS_CONCATENATION
1996 </ul>
1997 <td>CpuRef
1998 <td>
1999 <ul>
2000 <li>All
2001 </ul>
2002 <td>
2003 <table>
2004 <tr><th>
2005 <tr><td>BFLOAT16
2006 <tr><td>FLOAT16
2007 <tr><td>FLOAT32
2008 <tr><td>QASYMMS8
2009 <tr><td>QASYMMU8
2010 <tr><td>QSYMMS16
2011 </table>
2012<tr>
2013 <td>CpuAcc
2014 <td>
2015 <ul>
2016 <li>All
2017 </ul>
2018 <td>
2019 <table>
2020 <tr><th>
2021 <tr><td>QASYMMU8
2022 <tr><td>QASYMMS8
2023 <tr><td>FLOAT16
2024 <tr><td>FLOAT32
2025 </table>
2026<tr>
2027 <td>GpuAcc
2028 <td>
2029 <ul>
2030 <li>All
2031 </ul>
2032 <td>
2033 <table>
2034 <tr><th>
2035 <tr><td>QASYMMU8
2036 <tr><td>QASYMMS8
2037 <tr><td>FLOAT16
2038 <tr><td>FLOAT32
2039 </table>
2040<tr>
2041 <td rowspan="3">MinimumLayer
2042 <td rowspan="3" style="width:200px;"> Layer to perform an elementwise minimum of two tensors.
2043 <td rowspan="3">
2044 <ul>
2045 <li>ANEURALNETWORKS_MINIMUM
2046 </ul>
2047 <td>CpuRef
2048 <td>
2049 <ul>
2050 <li>All
2051 </ul>
2052 <td>
2053 <table>
2054 <tr><th>
2055 <tr><td>BFLOAT16
2056 <tr><td>FLOAT16
2057 <tr><td>FLOAT32
2058 <tr><td>QASYMMS8
2059 <tr><td>QASYMMU8
2060 <tr><td>QSYMMS16
2061 <tr><td>SIGNED32
2062 </table>
2063<tr>
2064 <td>CpuAcc
2065 <td>
2066 <ul>
2067 <li>All
2068 </ul>
2069 <td>
2070 <table>
2071 <tr><th>
2072 <tr><td>QASYMMU8
2073 <tr><td>QASYMMS8
2074 <tr><td>QSYMMS16
2075 <tr><td>FLOAT16
2076 <tr><td>FLOAT32
2077 </table>
2078<tr>
2079 <td>GpuAcc
2080 <td>
2081 <ul>
2082 <li>All
2083 </ul>
2084 <td>
2085 <table>
2086 <tr><th>
2087 <tr><td>QASYMMU8
2088 <tr><td>QASYMMS8
2089 <tr><td>QSYMMS16
2090 <tr><td>FLOAT16
2091 <tr><td>FLOAT32
2092 <tr><td>SIGNED32
2093 </table>
2094<tr>
2095 <td rowspan="3">MultiplicationLayer
2096 <td rowspan="3" style="width:200px;"> Layer to perform an elementwise multiplication of two tensors.
2097 <td rowspan="3">
2098 <ul>
2099 <li>ANEURALNETWORKS_MUL
2100 </ul>
2101 <td>CpuRef
2102 <td>
2103 <ul>
2104 <li>All
2105 </ul>
2106 <td>
2107 <table>
2108 <tr><th>
2109 <tr><td>BFLOAT16
2110 <tr><td>FLOAT16
2111 <tr><td>FLOAT32
2112 <tr><td>QASYMMS8
2113 <tr><td>QASYMMU8
2114 <tr><td>QSYMMS16
2115 <tr><td>SIGNED32
2116 </table>
2117<tr>
2118 <td>CpuAcc
2119 <td>
2120 <ul>
2121 <li>All
2122 </ul>
2123 <td>
2124 <table>
2125 <tr><th>
2126 <tr><td>QASYMMU8
2127 <tr><td>QASYMMS8
2128 <tr><td>QSYMMS16
2129 <tr><td>SIGNED32
2130 <tr><td>FLOAT16
2131 <tr><td>FLOAT32
2132 </table>
2133<tr>
2134 <td>GpuAcc
2135 <td>
2136 <ul>
2137 <li>All
2138 </ul>
2139 <td>
2140 <table>
2141 <tr><th>
2142 <tr><td>QASYMMU8
2143 <tr><td>QASYMMS8
2144 <tr><td>QSYMMS16
2145 <tr><td>SIGNED32
2146 <tr><td>FLOAT16
2147 <tr><td>FLOAT32
2148 <tr><td>SIGNED32
2149 </table>
2150<tr>
2151 <td rowspan="3">NormalizationLayer
2152 <td rowspan="3" style="width:200px;"> Layer to compute normalization operation.
2153 <td rowspan="3">
2154 <ul>
2155 <li>ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION
2156 </ul>
2157 <td>CpuRef
2158 <td>
2159 <ul>
2160 <li>All
2161 </ul>
2162 <td>
2163 <table>
2164 <tr><th>
2165 <tr><td>BFLOAT16
2166 <tr><td>FLOAT16
2167 <tr><td>FLOAT32
2168 <tr><td>QASYMMS8
2169 <tr><td>QASYMMU8
2170 <tr><td>QSYMMS16
2171 </table>
2172<tr>
2173 <td>CpuAcc
2174 <td>
2175 <ul>
2176 <li>NHWC
2177 <li>NCHW
2178 </ul>
2179 <td>
2180 <table>
2181 <tr><th>
2182 <tr><td>FLOAT32
2183 <tr><td>FLOAT16
2184 </table>
2185<tr>
2186 <td>GpuAcc
2187 <td>
2188 <ul>
2189 <li>NHWC
2190 <li>NCHW
2191 </ul>
2192 <td>
2193 <table>
2194 <tr><th>
2195 <tr><td>FLOAT32
2196 <tr><td>FLOAT16
2197 </table>
2198<tr>
2199 <td rowspan="1">OutputLayer
2200 <td rowspan="1" style="width:200px;"> A special layer providing access to a user supplied buffer into which the output of a network can be written.
2201 <td rowspan="1">
2202 <ul>
2203 <li>N/A
2204 </ul>
2205 <td>All
2206 <td>
2207 <ul>
2208 <li>All
2209 </ul>
2210 <td>
2211 <table>
2212 <tr><th>
2213 <tr><td>All
2214 </table>
2215<tr>
2216 <td rowspan="3">PadLayer
2217 <td rowspan="3" style="width:200px;"> Layer to pad a tensor.
2218 <td rowspan="3">
2219 <ul>
2220 <li>ANEURALNETWORKS_PAD
2221 <li>ANEURALNETWORKS_PAD_V2
2222 </ul>
2223 <td>CpuRef
2224 <td>
2225 <ul>
2226 <li>All
2227 </ul>
2228 <td>
2229 <table>
2230 <tr><th>
2231 <tr><td>BFLOAT16
2232 <tr><td>FLOAT16
2233 <tr><td>FLOAT32
2234 <tr><td>QASYMMS8
2235 <tr><td>QASYMMU8
2236 <tr><td>QSYMMS16
2237 </table>
2238<tr>
2239 <td>CpuAcc
2240 <td>
2241 <ul>
2242 <li>NHWC
2243 <li>NCHW
2244 </ul>
2245 <td>
2246 <table>
2247 <tr><th>
2248 <tr><td>All
2249 </table>
2250<tr>
2251 <td>GpuAcc
2252 <td>
2253 <ul>
2254 <li>NHWC
2255 <li>NCHW
2256 </ul>
2257 <td>
2258 <table>
2259 <tr><th>
2260 <tr><td>All
2261 </table>
2262<tr>
2263 <td rowspan="3">PermuteLayer
2264 <td rowspan="3" style="width:200px;"> Layer to transpose an ND tensor.
2265 <td rowspan="3">
2266 <ul>
2267 <li>ANEURALNETWORKS_TRANSPOSE
2268 </ul>
2269 <td>CpuRef
2270 <td>
2271 <ul>
2272 <li>All
2273 </ul>
2274 <td>
2275 <table>
2276 <tr><th>
2277 <tr><td>BFLOAT16
2278 <tr><td>FLOAT16
2279 <tr><td>FLOAT32
2280 <tr><td>QASYMMS8
2281 <tr><td>QASYMMU8
2282 <tr><td>QSYMMS16
2283 </table>
2284<tr>
2285 <td>CpuAcc
2286 <td>
2287 <ul>
2288 <li>NHWC
2289 <li>NCHW
2290 </ul>
2291 <td>
2292 <table>
2293 <tr><th>
2294 <tr><td>All
2295 </table>
2296<tr>
2297 <td>GpuAcc
2298 <td>
2299 <ul>
2300 <li>NHWC
2301 <li>NCHW
2302 </ul>
2303 <td>
2304 <table>
2305 <tr><th>
2306 <tr><td>All
2307 </table>
2308<tr>
2309 <td rowspan="3">Pooling2dLayer
Tamás Nyíri7b885b32021-10-26 14:47:57 +01002310 <td rowspan="3" style="width:200px;"> Layer to perform 2D pooling with the specified pooling operation.
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002311 <td rowspan="3">
2312 <ul>
2313 <li>ANEURALNETWORKS_AVERAGE_POOL_2D
2314 <li>ANEURALNETWORKS_L2_POOL_2D
2315 <li>ANEURALNETWORKS_MAX_POOL_2D
2316 </ul>
2317 <td>CpuRef
2318 <td>
2319 <ul>
2320 <li>All
2321 </ul>
2322 <td>
2323 <table>
2324 <tr><th>
2325 <tr><td>BFLOAT16
2326 <tr><td>FLOAT16
2327 <tr><td>FLOAT32
2328 <tr><td>QASYMMS8
2329 <tr><td>QASYMMU8
2330 <tr><td>QSYMMS16
2331 </table>
2332<tr>
2333 <td>CpuAcc
2334 <td>
2335 <ul>
2336 <li>NHWC
2337 <li>NCHW
2338 </ul>
2339 <td>
2340 <table>
2341 <tr><th>
2342 <tr><td>QASYMMU8
2343 <tr><td>QASYMMS8
2344 <tr><td>FLOAT16
2345 <tr><td>FLOAT32
2346 </table>
2347<tr>
2348 <td>GpuAcc
2349 <td>
2350 <ul>
2351 <li>NHWC
2352 <li>NCHW
2353 </ul>
2354 <td>
2355 <table>
2356 <tr><th>
2357 <tr><td>QASYMMU8
2358 <tr><td>QASYMMS8
2359 <tr><td>FLOAT16
2360 <tr><td>FLOAT32
2361 </table>
2362<tr>
Tamás Nyíri7b885b32021-10-26 14:47:57 +01002363 <td rowspan="3">Pooling3dLayer
2364 <td rowspan="3" style="width:200px;"> Layer to perform 3D pooling with the specified pooling operation.
2365 <td rowspan="3">
2366 <ul>
2367 <li>ANEURALNETWORKS_AVERAGE_POOL_3D
2368 <li>ANEURALNETWORKS_L2_POOL_3D
2369 <li>ANEURALNETWORKS_MAX_POOL_3D
2370 </ul>
2371 <td>CpuRef
2372 <td>
2373 <ul>
2374 <li>NDHWC
2375 </ul>
2376 <td>
2377 <table>
2378 <tr><th>
2379 <tr><td>BFLOAT16
2380 <tr><td>FLOAT16
2381 <tr><td>FLOAT32
2382 <tr><td>QASYMMS8
2383 <tr><td>QASYMMU8
2384 <tr><td>QSYMMS16
2385 </table>
2386<tr>
2387 <td>CpuAcc
2388 <td>
2389 <ul>
2390 <li>NA
2391 </ul>
2392 <td>
2393<tr>
2394 <td>GpuAcc
2395 <td>
2396 <ul>
2397 <li>NDHWC
2398 </ul>
Nikhil Raj930e1a22023-06-08 09:49:46 +01002399 <td>
Tamás Nyíri7b885b32021-10-26 14:47:57 +01002400<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002401 <td rowspan="1">PreCompiledLayer
2402 <td rowspan="1" style="width:200px;"> Opaque layer provided by a backend which provides an executable representation of a subgraph from the original network.
2403 <td rowspan="1">
2404 <ul>
2405 <li>N/A
2406 </ul>
2407 <td>N/A
2408 <td>N/A
2409 <td>N/A
2410<tr>
2411 <td rowspan="3">PreluLayer
2412 <td rowspan="3" style="width:200px;"> Layer to compute the activation layer with the PRELU activation function.
2413 <td rowspan="3">
2414 <ul>
2415 <li>ANEURALNETWORKS_PRELU
2416 </ul>
2417 <td>CpuRef
2418 <td>
2419 <ul>
2420 <li>All
2421 </ul>
2422 <td>
2423 <table>
2424 <tr><th>
2425 <tr><td>BFLOAT16
2426 <tr><td>FLOAT16
2427 <tr><td>FLOAT32
2428 <tr><td>QASYMMS8
2429 <tr><td>QASYMMU8
2430 <tr><td>QSYMMS16
2431 </table>
2432<tr>
2433 <td>CpuAcc
2434 <td>
2435 <ul>
2436 <li>All
2437 </ul>
2438 <td>
2439 <table>
2440 <tr><th>
2441 <tr><td>QASYMMU8
2442 <tr><td>QASYMMS8
2443 <tr><td>FLOAT16
2444 <tr><td>FLOAT32
2445 </table>
2446<tr>
2447 <td>GpuAcc
2448 <td>
2449 <ul>
2450 <li>All
2451 </ul>
2452 <td>
2453 <table>
2454 <tr><th>
2455 <tr><td>QASYMMU8
2456 <tr><td>QASYMMS8
2457 <tr><td>FLOAT16
2458 <tr><td>FLOAT32
2459 </table>
2460<tr>
2461 <td rowspan="3">QLstmLayer
2462 <td rowspan="3" style="width:200px;"> Layer to perform quantized LSTM (Long Short-Term Memory) operation.
2463 <td rowspan="3">
2464 <ul>
2465 <li>ANEURALNETWORKS_QUANTIZED_LSTM
2466 <li>ANEURALNETWORKS_QUANTIZED_16BIT_LSTM
2467 </ul>
2468 <td>CpuRef
2469 <td>
2470 <ul>
2471 <li>All
2472 </ul>
2473 <td>
2474 <table>
2475 <tr><th>
2476 <tr><td>All
2477 </table>
2478<tr>
2479 <td>CpuAcc
2480 <td>
2481 <ul>
2482 <li>All
2483 </ul>
2484 <td>
2485 <table>
2486 <tr><th>
2487 <tr><td>QASYMMS8
2488 <tr><td>QASYMMU8
2489 <tr><td>SIGNED32
2490 <tr><td>QSYMMS16
2491 </table>
2492<tr>
2493 <td>GpuAcc
2494 <td>
2495 <ul>
2496 <li>All
2497 </ul>
2498 <td>
2499 <table>
2500 <tr><th>
2501 <tr><td>QASYMMS8
2502 <tr><td>QASYMMU8
2503 <tr><td>SIGNED32
2504 <tr><td>QSYMMS16
2505 </table>
2506<tr>
2507 <td rowspan="3">QuantizeLayer
2508 <td rowspan="3" style="width:200px;"> Layer to perform quantization operation.
2509 <td rowspan="3">
2510 <ul>
2511 <li>ANEURALNETWORKS_QUANTIZE
2512 </ul>
2513 <td>CpuRef
2514 <td>
2515 <ul>
2516 <li>All
2517 </ul>
2518 <td>
2519 <table>
2520 <tr><th>
2521 <tr><td>BFLOAT16
2522 <tr><td>FLOAT16
2523 <tr><td>FLOAT32
2524 <tr><td>QASYMMS8
2525 <tr><td>QASYMMU8
2526 <tr><td>QSYMMS8
2527 <tr><td>QSYMMS16
2528 </table>
2529<tr>
2530 <td>CpuAcc
2531 <td>
2532 <ul>
2533 <li>All
2534 </ul>
2535 <td>
2536 <table>
2537 <tr><th>
2538 <tr><td>QASYMMU8
2539 <tr><td>QASYMMS8
2540 <tr><td>QASYMM16
2541 <tr><td>FLOAT16
2542 <tr><td>FLOAT32
2543 </table>
2544<tr>
2545 <td>GpuAcc
2546 <td>
2547 <ul>
2548 <li>All
2549 </ul>
2550 <td>
2551 <table>
2552 <tr><th>
2553 <tr><td>QASYMMU8
2554 <tr><td>QASYMMS8
2555 <tr><td>QASYMM16
2556 <tr><td>FLOAT16
2557 <tr><td>FLOAT32
2558 </table>
2559<tr>
2560 <td rowspan="3">QuantizedLstmLayer
2561 <td rowspan="3" style="width:200px;"> Layer to perform quantized LSTM (Long Short-Term Memory) operation.
2562 <td rowspan="3">
2563 <ul>
2564 <li>ANEURALNETWORKS_QUANTIZED_LSTM
2565 <li>ANEURALNETWORKS_QUANTIZED_16BIT_LSTM
2566 </ul>
2567 <td>CpuRef
2568 <td>
2569 <ul>
2570 <li>All
2571 </ul>
2572 <td>
2573 <table>
2574 <tr><th>
2575 <tr><td>All
2576 </table>
2577<tr>
2578 <td>CpuAcc
2579 <td>
2580 <ul>
2581 <li>All
2582 </ul>
2583 <td>
2584 <table>
2585 <tr><th>
2586 <tr><td>SIGNED32
2587 <tr><td>QASYMMU8
2588 <tr><td>QSYMMS16
2589 </table>
2590<tr>
2591 <td>GpuAcc
2592 <td>
2593 <ul>
2594 <li>All
2595 </ul>
2596 <td>
2597 <table>
2598 <tr><th>
2599 <tr><td>SIGNED32
2600 <tr><td>QASYMMU8
2601 <tr><td>QSYMMS16
2602 </table>
2603<tr>
2604 <td rowspan="3">RankLayer
2605 <td rowspan="3" style="width:200px;"> Layer to perform a rank operation.
2606 <td rowspan="3">
2607 <ul>
2608 <li>ANEURALNETWORKS_RANK
2609 </ul>
2610 <td>CpuRef
2611 <td>
2612 <ul>
2613 <li>All
2614 </ul>
2615 <td>
2616 <table>
2617 <tr><th>
2618 <tr><td>All
2619 </table>
2620<tr>
2621 <td>CpuAcc
2622 <td>
2623 <ul>
2624 <li>All
2625 </ul>
2626 <td>
2627 <table>
2628 <tr><th>
2629 <tr><td>All
2630 </table>
2631<tr>
2632 <td>GpuAcc
2633 <td>
2634 <ul>
2635 <li>All
2636 </ul>
2637 <td>
2638 <table>
2639 <tr><th>
2640 <tr><td>All
2641 </table>
2642<tr>
2643 <td rowspan="3">ReduceLayer
2644 <td rowspan="3" style="width:200px;"> Layer to perform reduce with the following operations - ARG_IDX_MAX: Index of the max value - ARG_IDX_MIN: Index of the min value - MEAN_SUM: Mean of sum - PROD: Product - SUM_SQUARE: Sum of squares - SUM: Sum - MIN: Min - MAX: Max
2645 <td rowspan="3">
2646 <ul>
2647 <li>ANEURALNETWORKS_REDUCE_MAX
2648 <li>ANEURALNETWORKS_REDUCE_MIN
2649 <li>ANEURALNETWORKS_REDUCE_SUM
Teresa Charlin32b78702021-09-03 11:25:54 +01002650 <li>ANEURALNETWORKS_REDUCE_PROD
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002651 </ul>
2652 <td>CpuRef
2653 <td>
2654 <ul>
2655 <li>All
2656 </ul>
2657 <td>
2658 <table>
2659 <tr><th>
2660 <tr><td>BFLOAT16
2661 <tr><td>FLOAT16
2662 <tr><td>FLOAT32
2663 <tr><td>QASYMMS8
2664 <tr><td>QASYMMU8
2665 <tr><td>QSYMMS16
2666 <tr><td>SIGNED32
2667 </table>
2668<tr>
2669 <td>CpuAcc
2670 <td>
2671 <ul>
2672 <li>All
2673 </ul>
2674 <td>
2675 <table>
2676 <tr><th>
2677 <tr><td>QASYMMU8
2678 <tr><td>QASYMMS8
2679 <tr><td>FLOAT16
2680 <tr><td>FLOAT32
2681 <tr><td>SIGNED32
2682 </table>
2683<tr>
2684 <td>GpuAcc
2685 <td>
2686 <ul>
2687 <li>All
2688 </ul>
2689 <td>
2690 <table>
2691 <tr><th>
2692 <tr><td>QASYMMU8
2693 <tr><td>QASYMMS8
2694 <tr><td>FLOAT16
2695 <tr><td>FLOAT32
2696 <tr><td>SIGNED32
2697 </table>
2698<tr>
2699 <td rowspan="3">ReshapeLayer
2700 <td rowspan="3" style="width:200px;"> Layer to reshape a tensor.
2701 <td rowspan="3">
2702 <ul>
2703 <li>ANEURALNETWORKS_RESHAPE
2704 <li>ANEURALNETWORKS_SQUEEZE
2705 <li>ANEURALNETWORKS_EXPAND_DIMS
2706 </ul>
2707 <td>CpuRef
2708 <td>
2709 <ul>
2710 <li>All
2711 </ul>
2712 <td>
2713 <table>
2714 <tr><th>
2715 <tr><td>BFLOAT16
2716 <tr><td>FLOAT16
2717 <tr><td>FLOAT32
2718 <tr><td>QASYMMS8
2719 <tr><td>QASYMMU8
2720 <tr><td>QSYMMS16
2721 <tr><td>SIGNED32
2722 <tr><td>BOOLEAN
2723 </table>
2724<tr>
2725 <td>CpuAcc
2726 <td>
2727 <ul>
2728 <li>All
2729 </ul>
2730 <td>
2731 <table>
2732 <tr><th>
2733 <tr><td>All
2734 </table>
2735<tr>
2736 <td>GpuAcc
2737 <td>
2738 <ul>
2739 <li>All
2740 </ul>
2741 <td>
2742 <table>
2743 <tr><th>
2744 <tr><td>All
2745 </table>
2746<tr>
2747 <td rowspan="3">ResizeLayer
2748 <td rowspan="3" style="width:200px;"> Layer to perform resize of a tensor using one of the interpolation methods: - Bilinear - Nearest Neighbor.
2749 <td rowspan="3">
2750 <ul>
2751 <li>ANEURALNETWORKS_RESIZE_BILINEAR
2752 <li>ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR
2753 </ul>
2754 <td>CpuRef
2755 <td>
2756 <ul>
2757 <li>All
2758 </ul>
2759 <td>
2760 <table>
2761 <tr><th>
2762 <tr><td>BFLOAT16
2763 <tr><td>FLOAT16
2764 <tr><td>FLOAT32
2765 <tr><td>QASYMMS8
2766 <tr><td>QASYMMU8
2767 <tr><td>QSYMMS16
2768 </table>
2769<tr>
2770 <td>CpuAcc
2771 <td>
2772 <ul>
2773 <li>NHWC
2774 <li>NCHW
2775 </ul>
2776 <td>
2777 <table>
2778 <tr><th>
2779 <tr><td>QASYMMU8
2780 <tr><td>QASYMMS8
2781 <tr><td>FLOAT16
2782 <tr><td>FLOAT32
2783 </table>
2784<tr>
Tracy Narine944fb502023-07-04 15:08:57 +01002785 <td>GpuAcc
2786 <td>
2787 <ul>
2788 <li>NHWC
2789 <li>NCHW
2790 </ul>
2791 <td>
2792 <table>
2793 <tr><th>
2794 <tr><td>QASYMMU8
2795 <tr><td>QASYMMS8
2796 <tr><td>FLOAT16
2797 <tr><td>FLOAT32
2798 </table>
2799<tr>
Tianle Cheng988354d2023-06-28 13:20:47 +01002800 <td rowspan="3">ReverseV2Layer
Tracy Narine944fb502023-07-04 15:08:57 +01002801 <td rowspan="3" style="width:200px;"> Layer to perform reverse of a tensor.
Tianle Cheng988354d2023-06-28 13:20:47 +01002802 <td rowspan="3">
2803 <ul>
Tracy Narine944fb502023-07-04 15:08:57 +01002804 <li>NA
Tianle Cheng988354d2023-06-28 13:20:47 +01002805 </ul>
2806 <td>CpuRef
2807 <td>
2808 <ul>
2809 <li>All
2810 </ul>
2811 <td>
2812 <table>
2813 <tr><th>
2814 <tr><td>BFLOAT16
2815 <tr><td>FLOAT16
2816 <tr><td>FLOAT32
2817 <tr><td>QASYMMS8
2818 <tr><td>QASYMMU8
2819 <tr><td>QSYMMS16
2820 </table>
2821<tr>
2822 <td>CpuAcc
2823 <td>
2824 <ul>
Tracy Narine944fb502023-07-04 15:08:57 +01002825 <li>All
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002826 </ul>
2827 <td>
2828 <table>
2829 <tr><th>
Tracy Narine944fb502023-07-04 15:08:57 +01002830 <tr><td>All
2831 </table>
2832<tr>
2833 <td>GpuAcc
2834 <td>
2835 <ul>
2836 <li>All
2837 </ul>
2838 <td>
2839 <table>
2840 <tr><th>
2841 <tr><td>All
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002842 </table>
2843<tr>
2844 <td rowspan="3">RsqrtLayer
2845 <td rowspan="3" style="width:200px;"> Layer to perform Rsqrt operation.
2846 <td rowspan="3">
2847 <ul>
2848 <li>ANEURALNETWORKS_RSQRT
2849 </ul>
2850 <td>CpuRef
2851 <td>
2852 <ul>
2853 <li>All
2854 </ul>
2855 <td>
2856 <table>
2857 <tr><th>
2858 <tr><td>BFLOAT16
2859 <tr><td>FLOAT16
2860 <tr><td>FLOAT32
2861 <tr><td>QASYMMS8
2862 <tr><td>QASYMMU8
2863 <tr><td>QSYMMS16
2864 <tr><td>SIGNED32
2865 </table>
2866<tr>
2867 <td>CpuAcc
2868 <td>
2869 <ul>
2870 <li>All
2871 </ul>
2872 <td>
2873 <table>
2874 <tr><th>
2875 <tr><td>FLOAT16
2876 <tr><td>FLOAT32
2877 <tr><td>SIGNED32
2878 </table>
2879<tr>
2880 <td>GpuAcc
2881 <td>
2882 <ul>
2883 <li>All
2884 </ul>
2885 <td>
2886 <table>
2887 <tr><th>
2888 <tr><td>FLOAT16
2889 <tr><td>FLOAT32
2890 </table>
2891<tr>
2892 <td rowspan="3">ShapeLayer
2893 <td rowspan="3" style="width:200px;"> Layer to return the shape of the input tensor.
2894 <td rowspan="3">
2895 <ul>
2896 <li>N/A
2897 </ul>
2898 <td>CpuRef
2899 <td>
2900 <ul>
2901 <li>All
2902 </ul>
2903 <td>
2904 <table>
2905 <tr><th>
2906 <tr><td>All
2907 </table>
2908<tr>
2909 <td>CpuAcc
2910 <td>
2911 <ul>
2912 <li>All
2913 </ul>
2914 <td>
2915 <table>
2916 <tr><th>
2917 <tr><td>All
2918 </table>
2919<tr>
2920 <td>GpuAcc
2921 <td>
2922 <ul>
2923 <li>All
2924 </ul>
2925 <td>
2926 <table>
2927 <tr><th>
2928 <tr><td>All
2929 </table>
2930<tr>
2931 <td rowspan="3">SliceLayer
2932 <td rowspan="3" style="width:200px;"> Layer to perform tensor slicing.
2933 <td rowspan="3">
2934 <ul>
2935 <li>ANEURALNETWORKS_SLICE
2936 </ul>
2937 <td>CpuRef
2938 <td>
2939 <ul>
2940 <li>All
2941 </ul>
2942 <td>
2943 <table>
2944 <tr><th>
2945 <tr><td>BFLOAT16
2946 <tr><td>FLOAT32
2947 <tr><td>QASYMMS8
2948 <tr><td>QASYMMU8
2949 <tr><td>QSYMMS16
2950 </table>
2951<tr>
2952 <td>CpuAcc
2953 <td>
2954 <ul>
2955 <li>All
2956 </ul>
2957 <td>
2958 <table>
2959 <tr><th>
2960 <tr><td>All
2961 </table>
2962<tr>
2963 <td>GpuAcc
2964 <td>
2965 <ul>
2966 <li>All
2967 </ul>
2968 <td>
2969 <table>
2970 <tr><th>
2971 <tr><td>All
2972 </table>
2973<tr>
2974 <td rowspan="3">SoftmaxLayer
2975 <td rowspan="3" style="width:200px;"> Layer to perform softmax, log-softmax operation over the specified axis.
2976 <td rowspan="3">
2977 <ul>
2978 <li>ANEURALNETWORKS_LOG_SOFTMAX
2979 <li>ANEURALNETWORKS_SOFTMAX
2980 </ul>
2981 <td>CpuRef
2982 <td>
2983 <ul>
2984 <li>All
2985 </ul>
2986 <td>
2987 <table>
2988 <tr><th>
2989 <tr><td>BFLOAT16
2990 <tr><td>FLOAT16
2991 <tr><td>FLOAT32
2992 <tr><td>QASYMMS8
2993 <tr><td>QASYMMU8
2994 <tr><td>QSYMMS8
2995 <tr><td>QSYMMS16
2996 </table>
2997<tr>
2998 <td>CpuAcc
2999 <td>
3000 <ul>
3001 <li>All
3002 </ul>
3003 <td>
3004 <table>
3005 <tr><th>
3006 <tr><td>QASYMMU8
3007 <tr><td>QASYMMS8
3008 <tr><td>FLOAT16
3009 <tr><td>FLOAT32
3010 </table>
3011<tr>
3012 <td>GpuAcc
3013 <td>
3014 <ul>
3015 <li>All
3016 </ul>
3017 <td>
3018 <table>
3019 <tr><th>
3020 <tr><td>QASYMMU8
3021 <tr><td>QASYMMS8
3022 <tr><td>FLOAT16
3023 <tr><td>FLOAT32
3024 </table>
3025<tr>
3026 <td rowspan="3">SpaceToBatchNdLayer
3027 <td rowspan="3" style="width:200px;"> Layer to divide spatial dimensions of the tensor into a grid of blocks and interleaves these blocks with the batch dimension.
3028 <td rowspan="3">
3029 <ul>
3030 <li>ANEURALNETWORKS_SPACE_TO_BATCH_ND
3031 </ul>
3032 <td>CpuRef
3033 <td>
3034 <ul>
3035 <li>All
3036 </ul>
3037 <td>
3038 <table>
3039 <tr><th>
3040 <tr><td>BFLOAT16
3041 <tr><td>FLOAT16
3042 <tr><td>FLOAT32
3043 <tr><td>QASYMMS8
3044 <tr><td>QASYMMU8
3045 <tr><td>QSYMMS16
3046 </table>
3047<tr>
3048 <td>CpuAcc
3049 <td>
3050 <ul>
3051 <li>NHWC
3052 <li>NCHW
3053 </ul>
3054 <td>
3055 <table>
3056 <tr><th>
3057 <tr><td>All
3058 </table>
3059<tr>
3060 <td>GpuAcc
3061 <td>
3062 <ul>
3063 <li>NHWC
3064 <li>NCHW
3065 </ul>
3066 <td>
3067 <table>
3068 <tr><th>
3069 <tr><td>All
3070 </table>
3071<tr>
3072 <td rowspan="3">SpaceToDepthLayer
3073 <td rowspan="3" style="width:200px;"> Layer to rearrange blocks of spatial data into depth.
3074 <td rowspan="3">
3075 <ul>
3076 <li>ANEURALNETWORKS_SPACE_TO_DEPTH
3077 </ul>
3078 <td>CpuRef
3079 <td>
3080 <ul>
3081 <li>All
3082 </ul>
3083 <td>
3084 <table>
3085 <tr><th>
3086 <tr><td>BFLOAT16
3087 <tr><td>FLOAT16
3088 <tr><td>FLOAT32
3089 <tr><td>QASYMMS8
3090 <tr><td>QASYMMU8
3091 <tr><td>QSYMMS16
3092 </table>
3093<tr>
3094 <td>CpuAcc
3095 <td>
3096 <ul>
3097 <li>NHWC
3098 <li>NCHW
3099 </ul>
3100 <td>
3101 <table>
3102 <tr><th>
3103 <tr><td>All
3104 </table>
3105<tr>
3106 <td>GpuAcc
3107 <td>
3108 <ul>
3109 <li>NHWC
3110 <li>NCHW
3111 </ul>
3112 <td>
3113 <table>
3114 <tr><th>
3115 <tr><td>All
3116 </table>
3117<tr>
3118 <td rowspan="3">SplitterLayer
3119 <td rowspan="3" style="width:200px;"> Layer to split a tensor along a given axis.
3120 <td rowspan="3">
3121 <ul>
3122 <li>ANEURALNETWORKS_SPLIT
3123 </ul>
3124 <td>CpuRef
3125 <td>
3126 <ul>
3127 <li>All
3128 </ul>
3129 <td>
3130 <table>
3131 <tr><th>
3132 <tr><td>BFLOAT16
3133 <tr><td>FLOAT16
3134 <tr><td>FLOAT32
3135 <tr><td>QASYMMS8
3136 <tr><td>QASYMMU8
3137 <tr><td>QSYMMS16
3138 </table>
3139<tr>
3140 <td>CpuAcc
3141 <td>
3142 <ul>
3143 <li>All
3144 </ul>
3145 <td>
3146 <table>
3147 <tr><th>
3148 <tr><td>All
3149 </table>
3150<tr>
3151 <td>GpuAcc
3152 <td>
3153 <ul>
3154 <li>All
3155 </ul>
3156 <td>
3157 <table>
3158 <tr><th>
3159 <tr><td>All
3160 </table>
3161<tr>
3162 <td rowspan="3">StackLayer
3163 <td rowspan="3" style="width:200px;"> Layer to stack tensors along an axis.
3164 <td rowspan="3">
3165 <ul>
3166 <li>N/A
3167 </ul>
3168 <td>CpuRef
3169 <td>
3170 <ul>
3171 <li>All
3172 </ul>
3173 <td>
3174 <table>
3175 <tr><th>
3176 <tr><td>BFLOAT16
3177 <tr><td>FLOAT16
3178 <tr><td>FLOAT32
3179 <tr><td>QASYMMS8
3180 <tr><td>QASYMMU8
3181 <tr><td>QSYMMS16
3182 </table>
3183<tr>
3184 <td>CpuAcc
3185 <td>
3186 <ul>
3187 <li>All
3188 </ul>
3189 <td>
3190 <table>
3191 <tr><th>
3192 <tr><td>All
3193 </table>
3194<tr>
3195 <td>GpuAcc
3196 <td>
3197 <ul>
3198 <li>All
3199 </ul>
3200 <td>
3201 <table>
3202 <tr><th>
3203 <tr><td>All
3204 </table>
3205<tr>
3206 <td rowspan="1">StandInLayer
3207 <td rowspan="1" style="width:200px;"> A layer to represent "unknown" or "unsupported" operations in the input graph. It has a configurable number of input and output slots and an optional name.
3208 <td rowspan="1">
3209 <ul>
3210 <li>N/A
3211 </ul>
3212 <td>N/A
3213 <td>N/A
3214 <td>N/A
3215<tr>
3216 <td rowspan="3">StridedSliceLayer
3217 <td rowspan="3" style="width:200px;"> Layer to extract a strided slice of a tensor.
3218 <td rowspan="3">
3219 <ul>
3220 <li>ANEURALNETWORKS_STRIDED_SLICE
3221 </ul>
3222 <td>CpuRef
3223 <td>
3224 <ul>
3225 <li>All
3226 </ul>
3227 <td>
3228 <table>
3229 <tr><th>
3230 <tr><td>BFLOAT16
3231 <tr><td>FLOAT32
3232 <tr><td>QASYMMS8
3233 <tr><td>QASYMMU8
3234 <tr><td>QSYMMS16
3235 </table>
3236<tr>
3237 <td>CpuAcc
3238 <td>
3239 <ul>
3240 <li>All
3241 </ul>
3242 <td>
3243 <table>
3244 <tr><th>
3245 <tr><td>All
3246 </table>
3247<tr>
3248 <td>GpuAcc
3249 <td>
3250 <ul>
3251 <li>All
3252 </ul>
3253 <td>
3254 <table>
3255 <tr><th>
3256 <tr><td>All
3257 </table>
3258<tr>
3259 <td rowspan="3">SubtractionLayer
3260 <td rowspan="3" style="width:200px;"> Layer to perform an elementwise subtract of 2 tensors.
3261 <td rowspan="3">
3262 <ul>
3263 <li>ANEURALNETWORKS_SUB
3264 </ul>
3265 <td>CpuRef
3266 <td>
3267 <ul>
3268 <li>All
3269 </ul>
3270 <td>
3271 <table>
3272 <tr><th>
3273 <tr><td>BFLOAT16
3274 <tr><td>FLOAT16
3275 <tr><td>FLOAT32
3276 <tr><td>QASYMMS8
3277 <tr><td>QASYMMU8
3278 <tr><td>QSYMMS16
3279 <tr><td>SIGNED32
3280 </table>
3281<tr>
3282 <td>CpuAcc
3283 <td>
3284 <ul>
3285 <li>All
3286 </ul>
3287 <td>
3288 <table>
3289 <tr><th>
3290 <tr><td>QASYMMU8
3291 <tr><td>QASYMMS8
3292 <tr><td>QSYMMS16
3293 <tr><td>SIGNED32
3294 <tr><td>FLOAT16
3295 <tr><td>FLOAT32
3296 </table>
3297<tr>
3298 <td>GpuAcc
3299 <td>
3300 <ul>
3301 <li>All
3302 </ul>
3303 <td>
3304 <table>
3305 <tr><th>
3306 <tr><td>QASYMMU8
3307 <tr><td>QASYMMS8
3308 <tr><td>QSYMMS16
3309 <tr><td>SIGNED32
3310 <tr><td>FLOAT16
3311 <tr><td>FLOAT32
3312 </table>
3313<tr>
Teresa Charlin79a06a52023-07-13 17:16:45 +01003314 <td rowspan="3">TileLayer
3315 <td rowspan="3" style="width:200px;"> Layer to construct a tensor by repeating in tiles a given tensor.
3316 <td rowspan="3">
3317 <ul>
3318 <li>ANEURALNETWORKS_TILE
3319 </ul>
3320 <td>CpuRef
3321 <td>
3322 <ul>
3323 <li>All
3324 </ul>
3325 <td>
3326 <table>
3327 <tr><th>
3328 <tr><td>FLOAT16
3329 <tr><td>FLOAT32
3330 <tr><td>QASYMMS8
3331 <tr><td>QASYMMU8
3332 <tr><td>QSYMMS16
3333 <tr><td>SIGNED32
3334 </table>
3335<tr>
3336 <td>CpuAcc
3337 <td>
3338 <ul>
David Monahan36e6eae2023-07-26 18:37:45 +01003339 <li>All
Teresa Charlin79a06a52023-07-13 17:16:45 +01003340 </ul>
3341 <td>
David Monahan36e6eae2023-07-26 18:37:45 +01003342 <table>
3343 <tr><th>
3344 <tr><td>FLOAT16
3345 <tr><td>FLOAT32
3346 <tr><td>QASYMMS8
3347 <tr><td>QASYMMU8
3348 <tr><td>QASYMM8
3349 <tr><td>QSYMMS16
3350 <tr><td>SIGNED32
3351 </table>
Teresa Charlin79a06a52023-07-13 17:16:45 +01003352<tr>
3353 <td>GpuAcc
3354 <td>
3355 <ul>
3356 <li>None
3357 </ul>
3358 <td>
3359 <table>
3360 <tr><th>
3361 <tr><td>None
3362 </table>
3363<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01003364 <td rowspan="3">TransposeConvolution2dLayer
3365 <td rowspan="3" style="width:200px;"> Layer to perform 2D transpose convolution (deconvolution) operation.
3366 <td rowspan="3">
3367 <ul>
3368 <li>ANEURALNETWORKS_TRANSPOSE_CONV_2D
3369 </ul>
3370 <td>CpuRef
3371 <td>
3372 <ul>
3373 <li>All
3374 </ul>
3375 <td>
3376 <table>
3377 <tr><th>
3378 <tr><td>BFLOAT16
3379 <tr><td>FLOAT16
3380 <tr><td>FLOAT32
3381 <tr><td>QASYMMS8
3382 <tr><td>QASYMMU8
3383 <tr><td>QSYMMS8
3384 <tr><td>QSYMMS16
3385 </table>
3386<tr>
3387 <td>CpuAcc
3388 <td>
3389 <ul>
3390 <li>NHWC
3391 <li>NCHW
3392 </ul>
3393 <td>
3394 <table>
3395 <tr><th>
3396 <tr><td>SIGNED32
3397 <tr><td>FLOAT16
3398 <tr><td>FLOAT32
3399 <tr><td>QASYMMU8
3400 <tr><td>QASYMMS8
3401 <tr><td>QUANTIZEDSYMM8PERAXIS
3402 </table>
3403<tr>
3404 <td>GpuAcc
3405 <td>
3406 <ul>
3407 <li>NHWC
3408 <li>NCHW
3409 </ul>
3410 <td>
3411 <table>
3412 <tr><th>
3413 <tr><td>SIGNED32
3414 <tr><td>FLOAT16
3415 <tr><td>FLOAT32
3416 <tr><td>QASYMMU8
3417 <tr><td>QASYMMS8
3418 <tr><td>QUANTIZEDSYMM8PERAXIS
3419 </table>
3420<tr>
3421 <td rowspan="3">TransposeLayer
3422 <td rowspan="3" style="width:200px;"> Layer to transpose a tensor.
3423 <td rowspan="3">
3424 <ul>
3425 <li>ANEURALNETWORKS_TRANSPOSE
3426 </ul>
3427 <td>CpuRef
3428 <td>
3429 <ul>
3430 <li>All
3431 </ul>
3432 <td>
3433 <table>
3434 <tr><th>
3435 <tr><td>BFLOAT16
3436 <tr><td>FLOAT16
3437 <tr><td>FLOAT32
3438 <tr><td>QASYMMS8
3439 <tr><td>QASYMMU8
3440 <tr><td>QSYMMS16
3441 </table>
3442<tr>
3443 <td>CpuAcc
3444 <td>
3445 <ul>
3446 <li>All
3447 </ul>
3448 <td>
3449 <table>
3450 <tr><th>
3451 <tr><td>All
3452 </table>
3453<tr>
3454 <td>GpuAcc
3455 <td>
3456 <ul>
3457 <li>All
3458 </ul>
3459 <td>
3460 <table>
3461 <tr><th>
3462 <tr><td>All
3463 </table>
3464<tr>
3465 <td rowspan="3">UnidirectionalSquenceLstmLayer
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01003466 <td rowspan="3" style="width:200px;"> Layer to perform unidirectional sequence LSTM operation.
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01003467 <td rowspan="3">
3468 <ul>
3469 <li>ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM
3470 </ul>
3471 <td>CpuRef
3472 <td>
3473 <ul>
3474 <li>All
3475 </ul>
3476 <td>
3477 <table>
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01003478 <tr><th>Input Types
3479 <tr><td>FLOAT32
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01003480 </table>
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01003481 <table>
3482 <tr><th>Weight Types
3483 <tr><td>FLOAT32
3484 <tr><td>QASYMMS8
3485 </table>
Nikhil Raj930e1a22023-06-08 09:49:46 +01003486<tr>
Cathal Corbettfd5bec42022-03-03 15:13:23 +00003487 <td>CpuAcc
3488 <td>
3489 <ul>
3490 <li>All
3491 </ul>
3492 <td>
3493 <table>
3494 <tr><th>Input Types
3495 <tr><td>FLOAT32
3496 </table>
3497 <table>
3498 <tr><th>Weight Types
3499 <tr><td>FLOAT32
3500 </table>
Nikhil Raj930e1a22023-06-08 09:49:46 +01003501<tr>
Cathal Corbett4952a3e2022-03-03 15:14:18 +00003502 <td>GpuAcc
3503 <td>
3504 <ul>
3505 <li>All
3506 </ul>
3507 <td>
3508 <table>
3509 <tr><th>Input Types
3510 <tr><td>FLOAT32
3511 </table>
3512 <table>
3513 <tr><th>Weight Types
3514 <tr><td>FLOAT32
3515 </table>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01003516<tr>
3517 <td rowspan="3">UnmapLayer
3518 <td rowspan="3" style="width:200px;"> Layer to perform unmap operation on tensor.
3519 <td rowspan="3">
3520 <ul>
3521 <li>N/A
3522 </ul>
3523 <td>CpuRef
3524 <td>
3525 <ul>
3526 <li>All
3527 </ul>
3528 <td>
3529 <table>
3530 <tr><th>
3531 <tr><td>All
3532 </table>
3533<tr>
3534 <td>CpuAcc
3535 <td>
3536 <ul>
3537 <li>NHWC
3538 <li>NCHW
3539 </ul>
3540 <td>
3541 <table>
3542 <tr><th>
3543 <tr><td>All
3544 </table>
3545<tr>
3546 <td>GpuAcc
3547 <td>
3548 <ul>
3549 <li>NHWC
3550 <li>NCHW
3551 </ul>
3552 <td>
3553 <table>
3554 <tr><th>
3555 <tr><td>All
3556 </table>
3557</table>
3558
3559*/
Tracy Narine944fb502023-07-04 15:08:57 +01003560} // namespace