blob: 6b2442d28f8750db715a02ebab0d22ab12eb84c3 [file] [log] [blame]
Tianle Cheng28288182024-02-23 17:56:54 +00001/// Copyright (c) 2021, 2023-2024 ARM Limited and Contributors. All rights reserved.
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002///
3/// SPDX-License-Identifier: MIT
4///
5
6namespace armnn
7{
8/**
9@page operator_list Arm NN Operators
10
Sadik Armagan1a9c9f62021-08-05 09:25:15 +010011
12@section S5_1_operator_list Arm NN Operators
13
14Arm NN supports operators that are listed in below table.
15
16Arm NN supports a wide list of data-types.
17The main data-types that the Machine Learning functions support are the following:
18 <ul>
19 <li><b>BFLOAT16:</b> 16-bit non-standard brain floating point
20 <li><b>QASYMMU8:</b> 8-bit unsigned asymmetric quantized
21 <li><b>QASYMMS8:</b> 8-bit signed asymmetric quantized
22 <li><b>QUANTIZEDSYMM8PERAXIS:</b> 8-bit signed symmetric quantized
Cathal Corbettb85113e2022-02-22 11:51:43 +000023 <li><b>QSYMMS8:</b> 8-bit signed symmetric quantized
24 <li><b>QSYMMS16:</b> 16-bit signed symmetric quantized
Sadik Armagan1a9c9f62021-08-05 09:25:15 +010025 <li><b>FLOAT32:</b> 32-bit single precision floating point
26 <li><b>FLOAT16:</b> 16-bit half precision floating point
27 <li><b>SIGNED32:</b> 32-bit signed integer
28 <li><b>BOOLEAN:</b> 8-bit unsigned char
29 <li><b>All:</b> Agnostic to any specific data type
30 </ul>
31
32Arm NN supports the following data layouts (fast changing dimension from right to left):
33 <ul>
34 <li><b>NHWC:</b> Layout where channels are in the fastest changing dimension
35 <li><b>NCHW:</b> Layout where width is in the fastest changing dimension
36 <li><b>All:</b> Agnostic to any specific data layout
37 </ul>
38where N = batches, C = channels, H = height, W = width
39
40<table>
41<caption id="multi_row"></caption>
42<tr>
43 <th>Operator
44 <th>Description
45 <th>Equivalent Android NNAPI Operator
46 <th>Backends
47 <th>Data Layouts
48 <th>Data Types
49<tr>
50 <td rowspan="3">AbsLayer
51 <td rowspan="3"> Layer to perform absolute operation.
52 <td rowspan="3">
53 <ul>
54 <li>ANEURALNETWORKS_ABS
55 </ul>
56 <td>CpuRef
57 <td>
58 <ul>
59 <li>All
60 </ul>
61 <td>
62 <table>
63 <tr><th>
64 <tr><td>BFLOAT16
65 <tr><td>FLOAT16
66 <tr><td>FLOAT32
67 <tr><td>QASYMMS8
68 <tr><td>QASYMMU8
69 <tr><td>QSYMMS16
70 <tr><td>SIGNED32
71 </table>
72<tr>
73 <td>CpuAcc
74 <td>
75 <ul>
76 <li>All
77 </ul>
78 <td>
79 <table>
80 <tr><th>
81 <tr><td>FLOAT16
82 <tr><td>FLOAT32
83 <tr><td>SIGNED32
84 </table>
85<tr>
86 <td>GpuAcc
87 <td>
88 <ul>
89 <li>All
90 </ul>
91 <td>
92 <table>
93 <tr><th>
94 <tr><td>FLOAT16
95 <tr><td>FLOAT32
96 </table>
97<tr>
98 <td rowspan="3">ActivationLayer
99 <td rowspan="3" style="width:200px;"> Layer to simulate an activation layer with the specified activation function.
100 <td rowspan="3">
101 <ul>
102 <li>ANEURALNETWORKS_ABS
103 <li>ANEURALNETWORKS_ELU
104 <li>ANEURALNETWORKS_HARD_SWISH
105 <li>ANEURALNETWORKS_LOGISTIC
106 <li>ANEURALNETWORKS_PRELU
107 <li>ANEURALNETWORKS_RELU
108 <li>ANEURALNETWORKS_RELU1
109 <li>ANEURALNETWORKS_RELU6
110 <li>ANEURALNETWORKS_SQRT
111 <li>ANEURALNETWORKS_TANH
Teresa Charlinded87bb2023-11-15 15:42:45 +0000112 <li>GELU
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100113 </ul>
114 <td>CpuRef
115 <td>
116 <ul>
117 <li>All
118 </ul>
119 <td>
120 <table>
121 <tr><th>
122 <tr><td>BFLOAT16
123 <tr><td>FLOAT16
124 <tr><td>FLOAT32
125 <tr><td>QASYMMS8
126 <tr><td>QASYMMU8
127 <tr><td>QSYMMS16
128 </table>
129<tr>
130 <td>CpuAcc
131 <td>
132 <ul>
133 <li>All
134 </ul>
135 <td>
136 <table>
137 <tr><th>
138 <tr><td>QASYMMU8
139 <tr><td>QASYMMS8
140 <tr><td>QSYMMS16
141 <tr><td>FLOAT16
142 <tr><td>FLOAT32
143 </table>
144<tr>
145 <td>GpuAcc
146 <td>
147 <ul>
148 <li>All
149 </ul>
150 <td>
151 <table>
152 <tr><th>
153 <tr><td>QASYMMU8
154 <tr><td>QASYMMS8
155 <tr><td>QSYMMS16
156 <tr><td>FLOAT16
157 <tr><td>FLOAT32
158 </table>
159<tr>
160 <td rowspan="3">AdditionLayer
161 <td rowspan="3" style="width:200px;"> Layer to add 2 tensors.
162 <td rowspan="3">
163 <ul>
164 <li>ANEURALNETWORKS_ADD
165 </ul>
166 <td>CpuRef
167 <td>
168 <ul>
169 <li>All
170 </ul>
171 <td>
172 <table>
173 <tr><th>
174 <tr><td>BFLOAT16
175 <tr><td>FLOAT16
176 <tr><td>FLOAT32
177 <tr><td>QASYMMS8
178 <tr><td>QASYMMU8
179 <tr><td>QSYMMS16
180 <tr><td>SIGNED32
181 </table>
182<tr>
183 <td>CpuAcc
184 <td>
185 <ul>
186 <li>All
187 </ul>
188 <td>
189 <table>
190 <tr><th>
191 <tr><td>QASYMMU8
192 <tr><td>QASYMMS8
193 <tr><td>QSYMMS16
194 <tr><td>SIGNED32
195 <tr><td>FLOAT16
196 <tr><td>FLOAT32
197 </table>
198<tr>
199 <td>GpuAcc
200 <td>
201 <ul>
202 <li>All
203 </ul>
204 <td>
205 <table>
206 <tr><th>
207 <tr><td>QASYMMU8
208 <tr><td>QASYMMS8
209 <tr><td>QSYMMS16
210 <tr><td>SIGNED32
211 <tr><td>FLOAT16
212 <tr><td>FLOAT32
213 </table>
214<tr>
215 <td rowspan="3">ArgMinMaxLayer
216 <td rowspan="3" style="width:200px;"> Layer to calculate the index of the minimum or maximum values in a tensor
217 based on an axis.
218 <td rowspan="3">
219 <ul>
220 <li>ANEURALNETWORKS_ARGMAX
221 <li>ANEURALNETWORKS_ARGMIN
222 </ul>
223 <td>CpuRef
224 <td>
225 <ul>
226 <li>All
227 </ul>
228 <td>
229 <table>
230 <tr><th>
231 <tr><td>BFLOAT16
232 <tr><td>FLOAT16
233 <tr><td>FLOAT32
234 <tr><td>QASYMMS8
235 <tr><td>QASYMMU8
236 <tr><td>QSYMMS16
237 <tr><td>SIGNED32
238 <tr><td>SIGNED64
239 </table>
240<tr>
241 <td>CpuAcc
242 <td>
243 <ul>
244 <li>All
245 </ul>
246 <td>
247 <table>
248 <tr><th>
249 <tr><td>QASYMMU8
250 <tr><td>QASYMMS8
251 <tr><td>SIGNED32
252 <tr><td>FLOAT16
253 <tr><td>FLOAT32
254 </table>
255<tr>
256 <td>GpuAcc
257 <td>
258 <ul>
259 <li>All
260 </ul>
261 <td>
262 <table>
263 <tr><th>
264 <tr><td>QASYMMU8
265 <tr><td>QASYMMS8
266 <tr><td>SIGNED32
267 <tr><td>FLOAT16
268 <tr><td>FLOAT32
269 </table>
270<tr>
Samuel Yap6b478092022-07-06 15:36:03 +0100271 <td rowspan="3">BatchMatMulLayer
272 <td rowspan="3" style="width:200px;"> Layer to perform batch matrix multiplication.
273 <td rowspan="3">
274 <ul>
275 <li>N/A
276 </ul>
277 <td>CpuRef
278 <td>
279 <ul>
280 <li>All
281 </ul>
282 <td>
283 <table>
284 <tr><th>
285 <tr><td>BFLOAT16
286 <tr><td>FLOAT16
287 <tr><td>FLOAT32
288 <tr><td>QASYMMS8
289 <tr><td>QASYMMU8
290 <tr><td>QSYMMS16
291 </table>
292<tr>
293 <td>CpuAcc
294 <td>
295 <ul>
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100296 <li>All
Samuel Yap6b478092022-07-06 15:36:03 +0100297 </ul>
298 <td>
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100299 <table>
300 <tr><th>
301 <tr><td>FLOAT32
Teresa Charlin1fe6c812022-11-01 15:59:50 +0000302 <tr><td>QASYMMS8
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100303 </table>
Samuel Yap6b478092022-07-06 15:36:03 +0100304<tr>
305 <td>GpuAcc
306 <td>
307 <ul>
Teresa Charlin94916a52022-10-19 08:48:07 +0100308 <li>All
Samuel Yap6b478092022-07-06 15:36:03 +0100309 </ul>
310 <td>
Teresa Charlin94916a52022-10-19 08:48:07 +0100311 <table>
312 <tr><th>
313 <tr><td>FLOAT32
Teresa Charlin97a3aef2023-01-10 10:32:51 +0000314 <tr><td>QASYMMS8
Teresa Charlin94916a52022-10-19 08:48:07 +0100315 </table>
Samuel Yap6b478092022-07-06 15:36:03 +0100316<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100317 <td rowspan="3">BatchNormalizationLayer
318 <td rowspan="3" style="width:200px;"> Layer to perform batch normalization.
319 <td rowspan="3">
320 <ul>
321 <li>N/A
322 </ul>
323 <td>CpuRef
324 <td>
325 <ul>
326 <li>All
327 </ul>
328 <td>
329 <table>
330 <tr><th>
331 <tr><td>BFLOAT16
332 <tr><td>FLOAT16
333 <tr><td>FLOAT32
334 <tr><td>QASYMMS8
335 <tr><td>QASYMMU8
336 <tr><td>QSYMMS16
337 </table>
338<tr>
339 <td>CpuAcc
340 <td>
341 <ul>
342 <li>NHWC
343 <li>NCHW
344 </ul>
345 <td>
346 <table>
347 <tr><th>
348 <tr><td>FLOAT32
349 <tr><td>FLOAT16
350 </table>
351<tr>
352 <td>GpuAcc
353 <td>
354 <ul>
355 <li>NHWC
356 <li>NCHW
357 </ul>
358 <td>
359 <table>
360 <tr><th>
361 <tr><td>FLOAT32
362 <tr><td>FLOAT16
363 </table>
364<tr>
365 <td rowspan="3">BatchToSpaceNdLayer
366 <td rowspan="3" style="width:200px;"> Layer to perform a batch to space transformation.
367 <td rowspan="3">
368 <ul>
369 <li>ANEURALNETWORKS_BATCH_TO_SPACE_ND
370 </ul>
371 <td>CpuRef
372 <td>
373 <ul>
374 <li>All
375 </ul>
376 <td>
377 <table>
378 <tr><th>
379 <tr><td>BFLOAT16
380 <tr><td>FLOAT16
381 <tr><td>FLOAT32
382 <tr><td>QASYMMS8
383 <tr><td>QASYMMU8
384 <tr><td>QSYMMS16
385 </table>
386<tr>
387 <td>CpuAcc
388 <td>
389 <ul>
390 <li>NHWC
391 <li>NCHW
392 </ul>
393 <td>
394 <table>
395 <tr><th>
396 <tr><td>All
397 </table>
398<tr>
399 <td>GpuAcc
400 <td>
401 <ul>
402 <li>NHWC
403 <li>NCHW
404 </ul>
405 <td>
406 <table>
407 <tr><th>
408 <tr><td>All
409 </table>
410<tr>
Teresa Charlinded87bb2023-11-15 15:42:45 +0000411 <td rowspan="3">BroadcastToLayer
412 <td rowspan="3" style="width:200px;"> Layer to broadcast a tensor to a given size.
413 <td rowspan="3">
414 <ul>
415 <li>N/A
416 </ul>
417 <td>CpuRef
418 <td>
419 <ul>
420 <li>N/A
421 </ul>
422 <td>
423 <table>
424 <tr><th>
425 <tr><td>N/A
426 </table>
427 <tr>
428 <td>CpuAcc
429 <td>
430 <ul>
431 <li>N/A
432 </ul>
433 <td>
434 <table>
435 <tr><th>
436 <tr><td>N/A
437 </table>
438 <tr>
439 <td>GpuAcc
440 <td>
441 <ul>
442 <li>N/A
443 </ul>
444 <td>
445 <table>
446 <tr><th>
447 <tr><td>N/A
448 </table>
449 <tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100450 <td rowspan="3">CastLayer
451 <td rowspan="3" style="width:200px;"> Layer to cast a tensor to a type.
452 <td rowspan="3">
453 <ul>
454 <li>ANEURALNETWORKS_CAST
455 </ul>
456 <td>CpuRef
457 <td>
458 <ul>
459 <li>All
460 </ul>
461 <td>
462 <table>
463 <tr><th>
464 <tr><td>BFLOAT16
465 <tr><td>FLOAT16
466 <tr><td>FLOAT32
467 <tr><td>QSYMMS8
468 <tr><td>QASYMMS8
469 <tr><td>QASYMMU8
470 <tr><td>QSYMMS16
471 <tr><td>SIGNED32
Teresa Charlinded87bb2023-11-15 15:42:45 +0000472 <tr><td>SIGNED64
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100473 </table>
474<tr>
475 <td>CpuAcc
476 <td>
477 <ul>
478 <li>All
479 </ul>
480 <td>
481 <table>
482 <tr><th>
Teresa Charlinded87bb2023-11-15 15:42:45 +0000483 <tr><td>FLOAT32
484 <tr><td>FLOAT16
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100485 <tr><td>QASYMMS8
486 <tr><td>QASYMMU8
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100487 <tr><td>SIGNED32
Teresa Charlinded87bb2023-11-15 15:42:45 +0000488 <tr><td>SIGNED64
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100489 </table>
490<tr>
491 <td>GpuAcc
492 <td>
493 <ul>
494 <li>All
495 </ul>
496 <td>
497 <table>
498 <tr><th>
Teresa Charlinded87bb2023-11-15 15:42:45 +0000499 <tr><td>FLOAT32
500 <tr><td>FLOAT16
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100501 <tr><td>QASYMMS8
502 <tr><td>QASYMMU8
503 <tr><td>SIGNED32
Teresa Charlinded87bb2023-11-15 15:42:45 +0000504 <tr><td>SIGNED64
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100505 </table>
506<tr>
Teresa Charlincd203852021-09-24 18:15:39 +0100507 <td rowspan="3">ChannelShuffleLayer
508 <td rowspan="3" style="width:200px;"> Layer to reorganize the channels of a tensor.
509 <td rowspan="3">
510 <ul>
511 <li>ANEURALNETWORKS_CHANNEL_SHUFFLE
512 </ul>
513 <td>CpuRef
514 <td>
515 <ul>
516 <li>All
517 </ul>
518 <td>
519 <table>
520 <tr><th>
521 <tr><td>FLOAT16
522 <tr><td>FLOAT32
523 <tr><td>QSYMMS8
524 <tr><td>QASYMMS8
525 <tr><td>QASYMMU8
526 </table>
527<tr>
528 <td>CpuAcc
529 <td>
530 <ul>
531 <li>All
532 </ul>
533 <td>
534 <table>
535 <tr><th>
536 <tr><td>QASYMMS8
537 <tr><td>QASYMMU8
538 <tr><td>FLOAT16
539 <tr><td>FLOAT32
540 </table>
541<tr>
542 <td>GpuAcc
543 <td>
544 <ul>
545 <li>All
546 </ul>
547 <td>
548 <table>
549 <tr><th>
550 <tr><td>QASYMMS8
551 <tr><td>QASYMMU8
552 <tr><td>FLOAT16
553 <tr><td>FLOAT32
554 </table>
555<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100556 <td rowspan="3">ComparisonLayer
557 <td rowspan="3" style="width:200px;"> Layer to compare 2 tensors.
558 <td rowspan="3">
559 <ul>
560 <li>ANEURALNETWORKS_EQUAL
561 <li>ANEURALNETWORKS_GREATER
562 <li>ANEURALNETWORKS_GREATER_EQUAL
563 <li>ANEURALNETWORKS_LESS
564 <li>ANEURALNETWORKS_LESS_EQUAL
565 <li>ANEURALNETWORKS_NOT_EQUAL
566 </ul>
567 <td>CpuRef
568 <td>
569 <ul>
570 <li>All
571 </ul>
572 <td>
573 <table>
574 <tr><th>
575 <tr><td>BFLOAT16
576 <tr><td>FLOAT16
577 <tr><td>FLOAT32
578 <tr><td>BOOLEAN
579 <tr><td>QASYMMS8
580 <tr><td>QASYMMU8
581 <tr><td>QSYMMS16
582 <tr><td>SIGNED32
583 </table>
584<tr>
585 <td>CpuAcc
586 <td>
587 <ul>
588 <li>All
589 </ul>
590 <td>
591 <table>
592 <tr><th>
593 <tr><td>All
594 </table>
595<tr>
596 <td>GpuAcc
597 <td>
598 <ul>
599 <li>All
600 </ul>
601 <td>
602 <table>
603 <tr><th>
604 <tr><td>All
605 </table>
606<tr>
607 <td rowspan="3">ConcatLayer
608 <td rowspan="3" style="width:200px;"> Layer to concatenate tensors along a given axis.
609 <td rowspan="3">
610 <ul>
611 <li>ANEURALNETWORKS_CONCATENATION
612 </ul>
613 <td>CpuRef
614 <td>
615 <ul>
616 <li>All
617 </ul>
618 <td>
619 <table>
620 <tr><th>
621 <tr><td>BFLOAT16
622 <tr><td>FLOAT16
623 <tr><td>FLOAT32
624 <tr><td>QASYMMS8
625 <tr><td>QASYMMU8
626 <tr><td>QSYMMS16
627 </table>
628<tr>
629 <td>CpuAcc
630 <td>
631 <ul>
632 <li>All
633 </ul>
634 <td>
635 <table>
636 <tr><th>
637 <tr><td>QASYMMU8
638 <tr><td>QASYMMS8
639 <tr><td>FLOAT16
640 <tr><td>FLOAT32
641 </table>
642<tr>
643 <td>GpuAcc
644 <td>
645 <ul>
646 <li>All
647 </ul>
648 <td>
649 <table>
650 <tr><th>
651 <tr><td>QASYMMU8
652 <tr><td>QASYMMS8
653 <tr><td>FLOAT16
654 <tr><td>FLOAT32
655 </table>
656<tr>
657 <td rowspan="3">ConstantLayer
658 <td rowspan="3" style="width:200px;"> Layer to provide a constant tensor.
659 <td rowspan="3">
660 <ul>
661 <li>N/A
662 </ul>
663 <td>CpuRef
664 <td>
665 <ul>
666 <li>All
667 </ul>
668 <td>
669 <table>
670 <tr><th>
671 <tr><td>BFLOAT16
672 <tr><td>FLOAT16
673 <tr><td>FLOAT32
674 <tr><td>QASYMMS8
675 <tr><td>QASYMMU8
676 <tr><td>QSYMMS8
677 <tr><td>QSYMMS16
678 <tr><td>SIGNED32
679 </table>
680<tr>
681 <td>CpuAcc
682 <td>
683 <ul>
684 <li>All
685 </ul>
686 <td>
687 <table>
688 <tr><th>
689 <tr><td>All
690 </table>
691<tr>
692 <td>GpuAcc
693 <td>
694 <ul>
695 <li>All
696 </ul>
697 <td>
698 <table>
699 <tr><th>
700 <tr><td>All
701 </table>
702<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100703 <td rowspan="3">ConvertFp16ToFp32Layer
704 <td rowspan="3" style="width:200px;"> Layer to convert Float16 tensor to Float32 tensor.
705 <td rowspan="3">
706 <ul>
707 <li>N/A
708 </ul>
709 <td>CpuRef
710 <td>
711 <ul>
712 <li>All
713 </ul>
714 <td>
715 <table>
716 <tr><th>
717 <tr><td>FLOAT16
718 <tr><td>FLOAT32
719 </table>
720<tr>
721 <td>CpuAcc
722 <td>
723 <ul>
724 <li>All
725 </ul>
726 <td>
727 <table>
728 <tr><th>
729 <tr><td>FLOAT16
730 <tr><td>FLOAT32
731 </table>
732<tr>
733 <td>GpuAcc
734 <td>
735 <ul>
736 <li>All
737 </ul>
738 <td>
739 <table>
740 <tr><th>
741 <tr><td>FLOAT16
742 <tr><td>FLOAT32
743 </table>
744<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100745 <td rowspan="3">ConvertFp32ToFp16Layer
746 <td rowspan="3" style="width:200px;"> Layer to convert Float32 tensor to Float16 tensor.
747 <td rowspan="3">
748 <ul>
749 <li>N/A
750 </ul>
751 <td>CpuRef
752 <td>
753 <ul>
754 <li>All
755 </ul>
756 <td>
757 <table>
758 <tr><th>
759 <tr><td>FLOAT16
760 <tr><td>FLOAT32
761 </table>
762<tr>
763 <td>CpuAcc
764 <td>
765 <ul>
766 <li>All
767 </ul>
768 <td>
769 <table>
770 <tr><th>
771 <tr><td>FLOAT16
772 <tr><td>FLOAT32
773 </table>
774<tr>
775 <td>GpuAcc
776 <td>
777 <ul>
778 <li>All
779 </ul>
780 <td>
781 <table>
782 <tr><th>
783 <tr><td>FLOAT16
784 <tr><td>FLOAT32
785 </table>
786<tr>
787 <td rowspan="3">Convolution2dLayer
788 <td rowspan="3" style="width:200px;"> Layer to compute a convolution operation.
789 <td rowspan="3">
790 <ul>
791 <li>ANEURALNETWORKS_CONV_2D
792 <li>ANEURALNETWORKS_GROUPED_CONV_2D
793 </ul>
794 <td>CpuRef
795 <td>
796 <ul>
797 <li>All
798 </ul>
799 <td>
800 <table>
801 <tr><th>
802 <tr><td>BFLOAT16
803 <tr><td>FLOAT16
804 <tr><td>FLOAT32
805 <tr><td>QASYMMS8
806 <tr><td>QASYMMU8
807 <tr><td>QSYMMS16
808 </table>
809<tr>
810 <td>CpuAcc
811 <td>
812 <ul>
813 <li>NHWC
814 <li>NCHW
815 </ul>
816 <td>
817 <table>
818 <tr><th>
819 <tr><td>SIGNED32
820 <tr><td>FLOAT16
821 <tr><td>FLOAT32
822 <tr><td>QASYMMU8
823 <tr><td>QASYMMS8
824 <tr><td>QUANTIZEDSYMM8PERAXIS
825 </table>
826<tr>
827 <td>GpuAcc
828 <td>
829 <ul>
830 <li>NHWC
831 <li>NCHW
832 </ul>
833 <td>
834 <table>
835 <tr><th>
836 <tr><td>SIGNED32
837 <tr><td>FLOAT16
838 <tr><td>FLOAT32
839 <tr><td>QASYMMU8
840 <tr><td>QASYMMS8
841 <tr><td>QUANTIZEDSYMM8PERAXIS
842 </table>
843<tr>
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100844 <td rowspan="3">Convolution3dLayer
845 <td rowspan="3" style="width:200px;"> Layer to compute a 3D convolution operation.
846 <td rowspan="3">
847 <ul>
848 <li>N/A
849 </ul>
850 <td>CpuRef
851 <td>
852 <ul>
853 <li>NDHWC
854 </ul>
855 <td>
856 <table>
857 <tr><th>
858 <tr><td>BFLOAT16
859 <tr><td>FLOAT16
860 <tr><td>FLOAT32
861 <tr><td>QASYMMS8
862 <tr><td>QASYMMU8
863 <tr><td>QSYMMS8
864 <tr><td>QSYMMS16
865 </table>
866<tr>
867 <td>CpuAcc
868 <td>
869 <ul>
870 <li>N/A
871 </ul>
872 <td>
873 <ul>
874 <li>N/A
875 </ul>
876<tr>
877 <td>GpuAcc
878 <td>
879 <ul>
880 <li>N/A
881 </ul>
882 <td>
883 <ul>
884 <li>N/A
885 </ul>
886<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100887 <td rowspan="1">DebugLayer
888 <td rowspan="1" style="width:200px;"> Layer to print out inter layer tensor information.
889 <td rowspan="1">
890 <ul>
891 <li>N/A
892 </ul>
893 <td>CpuRef
894 <td>
895 <ul>
896 <li>All
897 </ul>
898 <td>
899 <table>
900 <tr><th>
901 <tr><td>BFLOAT16
902 <tr><td>FLOAT16
903 <tr><td>FLOAT32
904 <tr><td>QASYMMS8
905 <tr><td>QASYMMU8
906 <tr><td>QSYMMS8
907 <tr><td>QSYMMS16
908 <tr><td>SIGNED32
909 </table>
910<tr>
911 <td rowspan="3">DepthToSpaceLayer
912 <td rowspan="3" style="width:200px;"> Layer to perform Depth to Space transformation.
913 <td rowspan="3">
914 <ul>
915 <li>ANEURALNETWORKS_DEPTH_TO_SPACE
916 </ul>
917 <td>CpuRef
918 <td>
919 <ul>
920 <li>All
921 </ul>
922 <td>
923 <table>
924 <tr><th>
925 <tr><td>BFLOAT16
926 <tr><td>FLOAT16
927 <tr><td>FLOAT32
928 <tr><td>QASYMMS8
929 <tr><td>QASYMMU8
930 <tr><td>QSYMMS16
931 </table>
932<tr>
933 <td>CpuAcc
934 <td>
935 <ul>
936 <li>NHWC
937 <li>NCHW
938 </ul>
939 <td>
940 <table>
941 <tr><th>
942 <tr><td>All
943 </table>
944<tr>
945 <td>GpuAcc
946 <td>
947 <ul>
948 <li>NHWC
949 <li>NCHW
950 </ul>
951 <td>
952 <table>
953 <tr><th>
954 <tr><td>All
955 </table>
956<tr>
957 <td rowspan="3">DepthwiseConvolution2dLayer
958 <td rowspan="3" style="width:200px;"> Layer to compute a deconvolution or transpose convolution.
959 <td rowspan="3">
960 <ul>
961 <li>ANEURALNETWORKS_DEPTHWISE_CONV_2D
962 </ul>
963 <td>CpuRef
964 <td>
965 <ul>
966 <li>All
967 </ul>
968 <td>
969 <table>
970 <tr><th>
971 <tr><td>BFLOAT16
972 <tr><td>FLOAT16
973 <tr><td>FLOAT32
974 <tr><td>QASYMMS8
975 <tr><td>QASYMMU8
976 <tr><td>QSYMMS8
977 <tr><td>QSYMMS16
978 </table>
979<tr>
980 <td>CpuAcc
981 <td>
982 <ul>
983 <li>NHWC
984 <li>NCHW
985 </ul>
986 <td>
987 <table>
988 <tr><th>
989 <tr><td>FLOAT16
990 <tr><td>FLOAT32
991 <tr><td>SIGNED32
992 <tr><td>QASYMMU8
993 <tr><td>QASYMMS8
994 <tr><td>QUANTIZEDSYMM8PERAXIS
995 </table>
996<tr>
997 <td>GpuAcc
998 <td>
999 <ul>
1000 <li>NHWC
1001 <li>NCHW
1002 </ul>
1003 <td>
1004 <table>
1005 <tr><th>
1006 <tr><td>FLOAT16
1007 <tr><td>FLOAT32
1008 <tr><td>SIGNED32
1009 <tr><td>QASYMMU8
1010 <tr><td>QASYMMS8
1011 <tr><td>QUANTIZEDSYMM8PERAXIS
1012 </table>
1013<tr>
1014 <td rowspan="3">DequantizeLayer
1015 <td rowspan="3" style="width:200px;"> Layer to dequantize the values in a tensor.
1016 <td rowspan="3">
1017 <ul>
1018 <li>ANEURALNETWORKS_DEQUANTIZE
1019 </ul>
1020 <td>CpuRef
1021 <td>
1022 <ul>
1023 <li>All
1024 </ul>
1025 <td>
1026 <table>
1027 <tr><th>
1028 <tr><td>QASYMMS8
1029 <tr><td>QASYMMU8
1030 <tr><td>QSYMMS8
1031 <tr><td>QSYMMS16
1032 </table>
1033<tr>
1034 <td>CpuAcc
1035 <td>
1036 <ul>
1037 <li>All
1038 </ul>
1039 <td>
1040 <table>
1041 <tr><th>
1042 <tr><td>FLOAT16
1043 <tr><td>FLOAT32
1044 <tr><td>QASYMMU8
1045 <tr><td>QASYMMS8
1046 <tr><td>QUANTIZEDSYMM8PERAXIS
1047 <tr><td>QSYMMS8
1048 <tr><td>QSYMMS16
1049 </table>
1050<tr>
1051 <td>GpuAcc
1052 <td>
1053 <ul>
1054 <li>All
1055 </ul>
1056 <td>
1057 <table>
1058 <tr><th>
1059 <tr><td>FLOAT16
1060 <tr><td>FLOAT32
1061 <tr><td>QASYMMU8
1062 <tr><td>QASYMMS8
1063 <tr><td>QUANTIZEDSYMM8PERAXIS
1064 <tr><td>QSYMMS8
1065 <tr><td>QSYMMS16
1066 </table>
1067<tr>
1068 <td rowspan="2">DetectionPostProcessLayer
1069 <td rowspan="2" style="width:200px;"> Layer to generate the detection output based on center size encoded boxes, class prediction and anchors by doing non maximum suppression (NMS).
1070 <td rowspan="2">
1071 <ul>
1072 <li>ANEURALNETWORKS_DETECTION_POSTPROCESSING
1073 </ul>
1074 <td>CpuRef
1075 <td>
1076 <ul>
1077 <li>All
1078 </ul>
1079 <td>
1080 <table>
1081 <tr><th>
1082 <tr><td>BFLOAT16
1083 <tr><td>FLOAT16
1084 <tr><td>FLOAT32
1085 <tr><td>QASYMMS8
1086 <tr><td>QASYMMU8
1087 <tr><td>QSYMMS16
1088 </table>
1089<tr>
1090 <td>CpuAcc
1091 <td>
1092 <ul>
1093 <li>All
1094 </ul>
1095 <td>
1096 <table>
1097 <tr><th>
1098 <tr><td>QASYMMU8
1099 <tr><td>QASYMMS8
1100 <tr><td>FLOAT32
1101 </table>
1102<tr>
1103 <td rowspan="3">DivisionLayer
1104 <td rowspan="3" style="width:200px;"> Layer to divide 2 tensors.
1105 <td rowspan="3">
1106 <ul>
1107 <li>ANEURALNETWORKS_DIV
1108 </ul>
1109 <td>CpuRef
1110 <td>
1111 <ul>
1112 <li>All
1113 </ul>
1114 <td>
1115 <table>
1116 <tr><th>
1117 <tr><td>BFLOAT16
1118 <tr><td>FLOAT16
1119 <tr><td>FLOAT32
1120 <tr><td>QASYMMS8
1121 <tr><td>QASYMMU8
1122 <tr><td>QSYMMS16
1123 <tr><td>SIGNED32
1124 </table>
1125<tr>
1126 <td>CpuAcc
1127 <td>
1128 <ul>
1129 <li>All
1130 </ul>
1131 <td>
1132 <table>
1133 <tr><th>
1134 <tr><td>FLOAT16
1135 <tr><td>FLOAT32
1136 </table>
1137<tr>
1138 <td>GpuAcc
1139 <td>
1140 <ul>
1141 <li>All
1142 </ul>
1143 <td>
1144 <table>
1145 <tr><th>
1146 <tr><td>FLOAT16
1147 <tr><td>FLOAT32
1148 </table>
1149<tr>
1150 <td rowspan="3">ElementwiseBaseLayer
1151 <td rowspan="3" style="width:200px;"> Layer to perform Add - Div - Max - Min - Mul operations.
1152 <td rowspan="3">
1153 <ul>
1154 <li>ANEURALNETWORKS_ADD
1155 <li>ANEURALNETWORKS_DIV
1156 <li>ANEURALNETWORKS_MAXIMUM
1157 <li>ANEURALNETWORKS_MINIMUM
1158 <li>ANEURALNETWORKS_MUL
1159 </ul>
1160 <td>CpuRef
1161 <td>
1162 <ul>
1163 <li>All
1164 </ul>
1165 <td>
1166 <table>
1167 <tr><th>
1168 <tr><td>BFLOAT16
1169 <tr><td>FLOAT16
1170 <tr><td>FLOAT32
1171 <tr><td>QASYMMS8
1172 <tr><td>QASYMMU8
1173 <tr><td>QSYMMS16
1174 <tr><td>SIGNED32
1175 </table>
1176<tr>
1177 <td>CpuAcc
1178 <td>
1179 <ul>
1180 <li>All
1181 </ul>
1182 <td>
1183 <table>
1184 <tr><th>
1185 <tr><td>QASYMMU8
1186 <tr><td>QASYMMS8
1187 <tr><td>QSYMMS16
1188 <tr><td>SIGNED32
1189 <tr><td>FLOAT16
1190 <tr><td>FLOAT32
1191 </table>
1192<tr>
1193 <td>GpuAcc
1194 <td>
1195 <ul>
1196 <li>All
1197 </ul>
1198 <td>
1199 <table>
1200 <tr><th>
1201 <tr><td>QASYMMU8
1202 <tr><td>QASYMMS8
1203 <tr><td>QSYMMS16
1204 <tr><td>SIGNED32
1205 <tr><td>FLOAT16
1206 <tr><td>FLOAT32
1207 </table>
1208<tr>
John Mcloughlin0ec00872023-05-15 17:03:49 +01001209 <td rowspan="3">ElementwiseBinaryLayer
1210 <td rowspan="3" style="width:200px;"> Layer to perform Power and Square Difference operations.
1211 <td rowspan="3">
1212 <ul>
1213 <li>ANEURALNETWORKS_POW
1214 </ul>
1215 <td>CpuRef
1216 <td>
1217 <ul>
1218 <li>All
1219 </ul>
1220 <td>
1221 <table>
1222 <tr><th>
1223 <tr><td>FLOAT16
1224 <tr><td>FLOAT32
1225 <tr><td>QASYMMS8
1226 <tr><td>QASYMMU8
1227 <tr><td>QSYMMS16
1228 <tr><td>SIGNED32
1229 </table>
1230<tr>
1231 <td>CpuAcc
1232 <td>
1233 <ul>
1234 <li>All
1235 </ul>
1236 <td>
1237 <table>
1238 <tr><th>
1239 <tr><td>FLOAT16
1240 <tr><td>FLOAT32
1241 </table>
1242<tr>
1243 <td>GpuAcc
1244 <td>
1245 <ul>
1246 <li>All
1247 </ul>
1248 <td>
1249 <table>
1250 <tr><th>
1251 <tr><td>FLOAT16
1252 <tr><td>FLOAT32
1253 </table>
1254<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01001255 <td rowspan="3">ElementwiseUnaryLayer
Nikhil Raj930e1a22023-06-08 09:49:46 +01001256 <td rowspan="3" style="width:200px;"> Layer to perform Rsqrt - Exp - Neg - Log - Abs - Sin - Sqrt - Ceil operations.
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01001257 <td rowspan="3">
1258 <ul>
1259 <li>ANEURALNETWORKS_ABS
1260 <li>ANEURALNETWORKS_EXP
1261 <li>ANEURALNETWORKS_LOG
1262 <li>ANEURALNETWORKS_NEG
1263 <li>ANEURALNETWORKS_RSQRT
1264 <li>ANEURALNETWORKS_SIN
1265 <li>ANEURALNETWORKS_SQRT
1266 </ul>
1267 <td>CpuRef
1268 <td>
1269 <ul>
1270 <li>All
1271 </ul>
1272 <td>
1273 <table>
1274 <tr><th>
1275 <tr><td>BFLOAT16
1276 <tr><td>FLOAT16
1277 <tr><td>FLOAT32
1278 <tr><td>QASYMMS8
1279 <tr><td>QASYMMU8
1280 <tr><td>QSYMMS16
1281 </table>
1282<tr>
1283 <td>CpuAcc
1284 <td>
1285 <ul>
1286 <li>All
1287 </ul>
1288 <td>
1289 <table>
1290 <tr><th>
1291 <tr><td>FLOAT16
1292 <tr><td>FLOAT32
1293 <tr><td>SIGNED32
1294 </table>
1295<tr>
1296 <td>GpuAcc
1297 <td>
1298 <ul>
1299 <li>All
1300 </ul>
1301 <td>
1302 <table>
1303 <tr><th>
1304 <tr><td>FLOAT16
1305 <tr><td>FLOAT32
1306 </table>
1307<tr>
1308 <td rowspan="1">FakeQuantizationLayer
1309 <td rowspan="1" style="width:200px;"> Layer to quantize float values and dequantize afterwards. The current implementation does not dequantize the values.
1310 <td rowspan="1">
1311 <ul>
1312 <li>N/A
1313 </ul>
1314 <td>CpuRef
1315 <td>
1316 <ul>
1317 <li>All
1318 </ul>
1319 <td>
1320 <table>
1321 <tr><th>
1322 <tr><td>FLOAT32
1323 </table>
1324<tr>
1325 <td rowspan="3">FillLayer
1326 <td rowspan="3" style="width:200px;"> Layer to set the values of a tensor with a given value.
1327 <td rowspan="3">
1328 <ul>
1329 <li>ANEURALNETWORKS_FILL
1330 </ul>
1331 <td>CpuRef
1332 <td>
1333 <ul>
1334 <li>All
1335 </ul>
1336 <td>
1337 <table>
1338 <tr><th>
1339 <tr><td>FLOAT16
1340 <tr><td>FLOAT32
1341 <tr><td>SIGNED32
1342 </table>
1343<tr>
1344 <td>CpuAcc
1345 <td>
1346 <ul>
1347 <li>All
1348 </ul>
1349 <td>
1350 <table>
1351 <tr><th>
1352 <tr><td>All
1353 </table>
1354<tr>
1355 <td>GpuAcc
1356 <td>
1357 <ul>
1358 <li>All
1359 </ul>
1360 <td>
1361 <table>
1362 <tr><th>
1363 <tr><td>All
1364 </table>
1365<tr>
1366 <td rowspan="3">FloorLayer
1367 <td rowspan="3" style="width:200px;"> Layer to round the value to the lowest whole number.
1368 <td rowspan="3">
1369 <ul>
1370 <li>ANEURALNETWORKS_FLOOR
1371 </ul>
1372 <td>CpuRef
1373 <td>
1374 <ul>
1375 <li>All
1376 </ul>
1377 <td>
1378 <table>
1379 <tr><th>
1380 <tr><td>BFLOAT16
1381 <tr><td>FLOAT16
1382 <tr><td>FLOAT32
1383 </table>
1384<tr>
1385 <td>CpuAcc
1386 <td>
1387 <ul>
1388 <li>All
1389 </ul>
1390 <td>
1391 <table>
1392 <tr><th>
1393 <tr><td>FLOAT32
1394 <tr><td>FLOAT16
1395 </table>
1396<tr>
1397 <td>GpuAcc
1398 <td>
1399 <ul>
1400 <li>All
1401 </ul>
1402 <td>
1403 <table>
1404 <tr><th>
1405 <tr><td>FLOAT32
1406 <tr><td>FLOAT16
1407 </table>
1408<tr>
1409 <td rowspan="3">FullyConnectedLayer
1410 <td rowspan="3" style="width:200px;"> Layer to perform a fully connected / dense operation.
1411 <td rowspan="3">
1412 <ul>
1413 <li>ANEURALNETWORKS_FULLY_CONNECTED
1414 </ul>
1415 <td>CpuRef
1416 <td>
1417 <ul>
1418 <li>All
1419 </ul>
1420 <td>
1421 <table>
1422 <tr><th>
1423 <tr><td>BFLOAT16
1424 <tr><td>FLOAT16
1425 <tr><td>FLOAT32
1426 <tr><td>QASYMMS8
1427 <tr><td>QASYMMU8
1428 <tr><td>QSYMMS16
1429 </table>
1430<tr>
1431 <td>CpuAcc
1432 <td>
1433 <ul>
1434 <li>NHWC
1435 <li>NCHW
1436 </ul>
1437 <td>
1438 <table>
1439 <tr><th>
1440 <tr><td>SIGNED32
1441 <tr><td>FLOAT16
1442 <tr><td>FLOAT32
1443 <tr><td>QASYMMU8
1444 <tr><td>QASYMMS8
1445 </table>
1446<tr>
1447 <td>GpuAcc
1448 <td>
1449 <ul>
1450 <li>NHWC
1451 <li>NCHW
1452 </ul>
1453 <td>
1454 <table>
1455 <tr><th>
1456 <tr><td>SIGNED32
1457 <tr><td>FLOAT16
1458 <tr><td>FLOAT32
1459 <tr><td>QASYMMU8
1460 <tr><td>QASYMMS8
1461 </table>
1462<tr>
1463 <td rowspan="3">GatherLayer
1464 <td rowspan="3" style="width:200px;"> Layer to perform the gather operation along the chosen axis.
1465 <td rowspan="3">
1466 <ul>
1467 <li>ANEURALNETWORKS_GATHER
1468 </ul>
1469 <td>CpuRef
1470 <td>
1471 <ul>
1472 <li>All
1473 </ul>
1474 <td>
1475 <table>
1476 <tr><th>
1477 <tr><td>BFLOAT16
1478 <tr><td>FLOAT16
1479 <tr><td>FLOAT32
1480 <tr><td>QASYMMS8
1481 <tr><td>QASYMMU8
1482 <tr><td>QSYMMS16
1483 <tr><td>SIGNED32
1484 </table>
1485<tr>
1486 <td>CpuAcc
1487 <td>
1488 <ul>
1489 <li>All
1490 </ul>
1491 <td>
1492 <table>
1493 <tr><th>
1494 <tr><td>All
1495 </table>
1496<tr>
1497 <td>GpuAcc
1498 <td>
1499 <ul>
1500 <li>All
1501 </ul>
1502 <td>
1503 <table>
1504 <tr><th>
1505 <tr><td>All
1506 </table>
1507<tr>
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001508 <td rowspan="3">GatherNdLayer
1509 <td rowspan="3" style="width:200px;"> Layer to perform the gatherNd operation.
1510 <td rowspan="3">
1511 <ul>
1512 <li>N/A
1513 </ul>
1514 <td>CpuRef
1515 <td>
1516 <ul>
1517 <li>All
1518 </ul>
1519 <td>
1520 <table>
1521 <tr><th>
1522 <tr><td>BFLOAT16
1523 <tr><td>FLOAT16
1524 <tr><td>FLOAT32
1525 <tr><td>QASYMMS8
1526 <tr><td>QASYMMU8
1527 <tr><td>QSYMMS16
1528 <tr><td>SIGNED32
1529 </table>
1530<tr>
1531 <td>CpuAcc
1532 <td>
1533 <ul>
Teresa Charlinbd22c7d2022-04-26 18:14:12 +01001534 <li>All
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001535 </ul>
1536 <td>
Teresa Charlinbd22c7d2022-04-26 18:14:12 +01001537 <table>
1538 <tr><th>
1539 <tr><td>BFLOAT16
1540 <tr><td>FLOAT16
1541 <tr><td>FLOAT32
1542 <tr><td>QASYMMS8
1543 <tr><td>QASYMMU8
1544 <tr><td>QSYMMS16
1545 <tr><td>SIGNED32
1546 </table>
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001547<tr>
1548 <td>GpuAcc
1549 <td>
1550 <ul>
Teresa Charlin989e2f62022-04-27 16:26:11 +01001551 <li>All
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001552 </ul>
1553 <td>
Teresa Charlin989e2f62022-04-27 16:26:11 +01001554 <table>
1555 <tr><th>
1556 <tr><td>BFLOAT16
1557 <tr><td>FLOAT16
1558 <tr><td>FLOAT32
1559 <tr><td>QASYMMS8
1560 <tr><td>QASYMMU8
1561 <tr><td>QSYMMS16
1562 <tr><td>SIGNED32
1563 </table>
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001564<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01001565 <td rowspan="1">InputLayer
1566 <td rowspan="1" style="width:200px;"> Special layer used to provide input data to the computational network.
1567 <td rowspan="1">
1568 <ul>
1569 <li>N/A
1570 </ul>
1571 <td>All
1572 <td>
1573 <ul>
1574 <li>All
1575 </ul>
1576 <td>
1577 <table>
1578 <tr><th>
1579 <tr><td>All
1580 </table>
1581<tr>
1582 <td rowspan="3">InstanceNormalizationLayer
1583 <td rowspan="3" style="width:200px;"> Layer to perform an instance normalization on a given axis.
1584 <td rowspan="3">
1585 <ul>
1586 <li>ANEURALNETWORKS_INSTANCE_NORMALIZATION
1587 </ul>
1588 <td>CpuRef
1589 <td>
1590 <ul>
1591 <li>All
1592 </ul>
1593 <td>
1594 <table>
1595 <tr><th>
1596 <tr><td>BFLOAT16
1597 <tr><td>FLOAT16
1598 <tr><td>FLOAT32
1599 </table>
1600<tr>
1601 <td>CpuAcc
1602 <td>
1603 <ul>
1604 <li>NHWC
1605 <li>NCHW
1606 </ul>
1607 <td>
1608 <table>
1609 <tr><th>
1610 <tr><td>FLOAT16
1611 <tr><td>FLOAT32
1612 </table>
1613<tr>
1614 <td>GpuAcc
1615 <td>
1616 <ul>
1617 <li>NHWC
1618 <li>NCHW
1619 </ul>
1620 <td>
1621 <table>
1622 <tr><th>
1623 <tr><td>FLOAT16
1624 <tr><td>FLOAT32
1625 </table>
1626<tr>
1627 <td rowspan="3">L2NormalizationLayer
1628 <td rowspan="3" style="width:200px;"> Layer to perform an L2 normalization on a given axis.
1629 <td rowspan="3">
1630 <ul>
1631 <li>ANEURALNETWORKS_L2_NORMALIZATION
1632 </ul>
1633 <td>CpuRef
1634 <td>
1635 <ul>
1636 <li>All
1637 </ul>
1638 <td>
1639 <table>
1640 <tr><th>
1641 <tr><td>BFLOAT16
1642 <tr><td>FLOAT16
1643 <tr><td>FLOAT32
1644 <tr><td>QASYMMS8
1645 <tr><td>QASYMMU8
1646 <tr><td>QSYMMS16
1647 </table>
1648<tr>
1649 <td>CpuAcc
1650 <td>
1651 <ul>
1652 <li>NHWC
1653 <li>NCHW
1654 </ul>
1655 <td>
1656 <table>
1657 <tr><th>
1658 <tr><td>FLOAT16
1659 <tr><td>FLOAT32
1660 </table>
1661<tr>
1662 <td>GpuAcc
1663 <td>
1664 <ul>
1665 <li>NHWC
1666 <li>NCHW
1667 </ul>
1668 <td>
1669 <table>
1670 <tr><th>
1671 <tr><td>FLOAT16
1672 <tr><td>FLOAT32
1673 </table>
1674<tr>
1675 <td rowspan="3">LogSoftmaxLayer
1676 <td rowspan="3" style="width:200px;"> Layer to perform the log softmax activations given logits.
1677 <td rowspan="3">
1678 <ul>
1679 <li>N/A
1680 </ul>
1681 <td>CpuRef
1682 <td>
1683 <ul>
1684 <li>All
1685 </ul>
1686 <td>
1687 <table>
1688 <tr><th>
1689 <tr><td>BFLOAT16
1690 <tr><td>FLOAT16
1691 <tr><td>FLOAT32
1692 </table>
1693<tr>
1694 <td>CpuAcc
1695 <td>
1696 <ul>
1697 <li>All
1698 </ul>
1699 <td>
1700 <table>
1701 <tr><th>
1702 <tr><td>QASYMMU8
1703 <tr><td>QASYMMS8
1704 <tr><td>FLOAT16
1705 <tr><td>FLOAT32
1706 </table>
1707<tr>
1708 <td>GpuAcc
1709 <td>
1710 <ul>
1711 <li>All
1712 </ul>
1713 <td>
1714 <table>
1715 <tr><th>
1716 <tr><td>QASYMMU8
1717 <tr><td>QASYMMS8
1718 <tr><td>FLOAT16
1719 <tr><td>FLOAT32
1720 </table>
1721<tr>
1722 <td rowspan="3">LogicalBinaryLayer
1723 <td rowspan="3" style="width:200px;"> Layer to perform Logical AND - Logical NOT - Logical OR operations.
1724 <td rowspan="3">
1725 <ul>
1726 <li>ANEURALNETWORKS_LOGICAL_AND
1727 <li>ANEURALNETWORKS_LOGICAL_NOT
1728 <li>ANEURALNETWORKS_LOGICAL_OR
1729 </ul>
1730 <td>CpuRef
1731 <td>
1732 <ul>
1733 <li>All
1734 </ul>
1735 <td>
1736 <table>
1737 <tr><th>
1738 <tr><td>BOOLEAN
1739 </table>
1740<tr>
1741 <td>CpuAcc
1742 <td>
1743 <ul>
1744 <li>All
1745 </ul>
1746 <td>
1747 <table>
1748 <tr><th>
1749 <tr><td>BOOLEAN
1750 </table>
1751<tr>
1752 <td>GpuAcc
1753 <td>
1754 <ul>
1755 <li>All
1756 </ul>
1757 <td>
1758 <table>
1759 <tr><th>
1760 <tr><td>BOOLEAN
1761 </table>
1762<tr>
1763 <td rowspan="3">LstmLayer
1764 <td rowspan="3" style="width:200px;"> Layer to perform a single time step in a Long Short-Term Memory (LSTM) operation.
1765 <td rowspan="3">
1766 <ul>
1767 <li>ANEURALNETWORKS_LSTM
1768 </ul>
1769 <td>CpuRef
1770 <td>
1771 <ul>
1772 <li>All
1773 </ul>
1774 <td>
1775 <table>
1776 <tr><th>
1777 <tr><td>BFLOAT16
1778 <tr><td>FLOAT16
1779 <tr><td>QSYMMS16
1780 </table>
1781<tr>
1782 <td>CpuAcc
1783 <td>
1784 <ul>
1785 <li>All
1786 </ul>
1787 <td>
1788 <table>
1789 <tr><th>
1790 <tr><td>FLOAT16
1791 <tr><td>FLOAT32
1792 </table>
1793<tr>
1794 <td>GpuAcc
1795 <td>
1796 <ul>
1797 <li>All
1798 </ul>
1799 <td>
1800 <table>
1801 <tr><th>
1802 <tr><td>FLOAT16
1803 <tr><td>FLOAT32
1804 </table>
1805<tr>
1806 <td rowspan="3">MapLayer
1807 <td rowspan="3" style="width:200px;"> Layer to perform map operation on tensor.
1808 <td rowspan="3">
1809 <ul>
1810 <li>N/A
1811 </ul>
1812 <td>CpuRef
1813 <td>
1814 <ul>
1815 <li>All
1816 </ul>
1817 <td>
1818 <table>
1819 <tr><th>
1820 <tr><td>All
1821 </table>
1822<tr>
1823 <td>CpuAcc
1824 <td>
1825 <ul>
1826 <li>All
1827 </ul>
1828 <td>
1829 <table>
1830 <tr><th>
1831 <tr><td>All
1832 </table>
1833<tr>
1834 <td>GpuAcc
1835 <td>
1836 <ul>
1837 <li>All
1838 </ul>
1839 <td>
1840 <table>
1841 <tr><th>
1842 <tr><td>All
1843 </table>
1844<tr>
1845 <td rowspan="3">MaximumLayer
1846 <td rowspan="3" style="width:200px;"> Layer to perform an elementwise maximum of two tensors.
1847 <td rowspan="3">
1848 <ul>
1849 <li>N/A
1850 </ul>
1851 <td>CpuRef
1852 <td>
1853 <ul>
1854 <li>All
1855 </ul>
1856 <td>
1857 <table>
1858 <tr><th>
1859 <tr><td>BFLOAT16
1860 <tr><td>FLOAT16
1861 <tr><td>FLOAT32
1862 <tr><td>QASYMMS8
1863 <tr><td>QASYMMU8
1864 <tr><td>QSYMMS16
1865 <tr><td>SIGNED32
1866 </table>
1867<tr>
1868 <td>CpuAcc
1869 <td>
1870 <ul>
1871 <li>All
1872 </ul>
1873 <td>
1874 <table>
1875 <tr><th>
1876 <tr><td>QASYMMU8
1877 <tr><td>QASYMMS8
1878 <tr><td>FLOAT16
1879 <tr><td>FLOAT32
1880 <tr><td>SIGNED32
1881 </table>
1882<tr>
1883 <td>GpuAcc
1884 <td>
1885 <ul>
1886 <li>All
1887 </ul>
1888 <td>
1889 <table>
1890 <tr><th>
1891 <tr><td>QASYMMU8
1892 <tr><td>QASYMMS8
1893 <tr><td>QSYMMS16
1894 <tr><td>FLOAT16
1895 <tr><td>FLOAT32
1896 <tr><td>SIGNED32
1897 </table>
1898<tr>
1899 <td rowspan="3">MeanLayer
1900 <td rowspan="3" style="width:200px;"> Layer to perform reduce mean operation.
1901 <td rowspan="3">
1902 <ul>
1903 <li>ANEURALNETWORKS_MEAN
1904 </ul>
1905 <td>CpuRef
1906 <td>
1907 <ul>
1908 <li>All
1909 </ul>
1910 <td>
1911 <table>
1912 <tr><th>
1913 <tr><td>BFLOAT16
1914 <tr><td>FLOAT16
1915 <tr><td>FLOAT32
1916 <tr><td>QASYMMS8
1917 <tr><td>QASYMMU8
1918 <tr><td>QSYMMS16
1919 </table>
1920<tr>
1921 <td>CpuAcc
1922 <td>
1923 <ul>
1924 <li>All
1925 </ul>
1926 <td>
1927 <table>
1928 <tr><th>
1929 <tr><td>QASYMMU8
1930 <tr><td>QASYMMS8
1931 <tr><td>FLOAT16
1932 <tr><td>FLOAT32
1933 </table>
1934<tr>
1935 <td>GpuAcc
1936 <td>
1937 <ul>
1938 <li>All
1939 </ul>
1940 <td>
1941 <table>
1942 <tr><th>
1943 <tr><td>QASYMMU8
1944 <tr><td>QASYMMS8
1945 <tr><td>FLOAT16
1946 <tr><td>FLOAT32
1947 </table>
1948<tr>
1949 <td rowspan="3">MemCopyLayer
1950 <td rowspan="3" style="width:200px;"> Layer to perform memory copy operation.
1951 <td rowspan="3">
1952 <ul>
1953 <li>N/A
1954 </ul>
1955 <td>CpuRef
1956 <td>
1957 <ul>
1958 <li>All
1959 </ul>
1960 <td>
1961 <table>
1962 <tr><th>
1963 <tr><td>BFLOAT16
1964 <tr><td>FLOAT16
1965 <tr><td>FLOAT32
1966 <tr><td>QASYMMS8
1967 <tr><td>QASYMMU8
1968 <tr><td>QSYMMS16
1969 <tr><td>BOOLEAN
1970 </table>
1971<tr>
1972 <td>CpuAcc
1973 <td>
1974 <ul>
1975 <li>All
1976 </ul>
1977 <td>
1978 <table>
1979 <tr><th>
1980 <tr><td>All
1981 </table>
1982<tr>
1983 <td>GpuAcc
1984 <td>
1985 <ul>
1986 <li>All
1987 </ul>
1988 <td>
1989 <table>
1990 <tr><th>
1991 <tr><td>All
1992 </table>
1993<tr>
1994 <td rowspan="3">MemImportLayer
1995 <td rowspan="3" style="width:200px;"> Layer to perform memory import operation.
1996 <td rowspan="3">
1997 <ul>
1998 <li>N/A
1999 </ul>
2000 <td>CpuRef
2001 <td>
2002 <ul>
2003 <li>All
2004 </ul>
2005 <td>
2006 <table>
2007 <tr><th>
2008 <tr><td>All
2009 </table>
2010<tr>
2011 <td>CpuAcc
2012 <td>
2013 <ul>
2014 <li>All
2015 </ul>
2016 <td>
2017 <table>
2018 <tr><th>
2019 <tr><td>All
2020 </table>
2021<tr>
2022 <td>GpuAcc
2023 <td>
2024 <ul>
2025 <li>All
2026 </ul>
2027 <td>
2028 <table>
2029 <tr><th>
2030 <tr><td>All
2031 </table>
2032<tr>
2033 <td rowspan="3">MergeLayer
2034 <td rowspan="3" style="width:200px;"> Layer to concatenate tensors along a given axis.
2035 <td rowspan="3">
2036 <ul>
2037 <li>ANEURALNETWORKS_CONCATENATION
2038 </ul>
2039 <td>CpuRef
2040 <td>
2041 <ul>
2042 <li>All
2043 </ul>
2044 <td>
2045 <table>
2046 <tr><th>
2047 <tr><td>BFLOAT16
2048 <tr><td>FLOAT16
2049 <tr><td>FLOAT32
2050 <tr><td>QASYMMS8
2051 <tr><td>QASYMMU8
2052 <tr><td>QSYMMS16
2053 </table>
2054<tr>
2055 <td>CpuAcc
2056 <td>
2057 <ul>
2058 <li>All
2059 </ul>
2060 <td>
2061 <table>
2062 <tr><th>
2063 <tr><td>QASYMMU8
2064 <tr><td>QASYMMS8
2065 <tr><td>FLOAT16
2066 <tr><td>FLOAT32
2067 </table>
2068<tr>
2069 <td>GpuAcc
2070 <td>
2071 <ul>
2072 <li>All
2073 </ul>
2074 <td>
2075 <table>
2076 <tr><th>
2077 <tr><td>QASYMMU8
2078 <tr><td>QASYMMS8
2079 <tr><td>FLOAT16
2080 <tr><td>FLOAT32
2081 </table>
2082<tr>
2083 <td rowspan="3">MinimumLayer
2084 <td rowspan="3" style="width:200px;"> Layer to perform an elementwise minimum of two tensors.
2085 <td rowspan="3">
2086 <ul>
2087 <li>ANEURALNETWORKS_MINIMUM
2088 </ul>
2089 <td>CpuRef
2090 <td>
2091 <ul>
2092 <li>All
2093 </ul>
2094 <td>
2095 <table>
2096 <tr><th>
2097 <tr><td>BFLOAT16
2098 <tr><td>FLOAT16
2099 <tr><td>FLOAT32
2100 <tr><td>QASYMMS8
2101 <tr><td>QASYMMU8
2102 <tr><td>QSYMMS16
2103 <tr><td>SIGNED32
2104 </table>
2105<tr>
2106 <td>CpuAcc
2107 <td>
2108 <ul>
2109 <li>All
2110 </ul>
2111 <td>
2112 <table>
2113 <tr><th>
2114 <tr><td>QASYMMU8
2115 <tr><td>QASYMMS8
2116 <tr><td>QSYMMS16
2117 <tr><td>FLOAT16
2118 <tr><td>FLOAT32
2119 </table>
2120<tr>
2121 <td>GpuAcc
2122 <td>
2123 <ul>
2124 <li>All
2125 </ul>
2126 <td>
2127 <table>
2128 <tr><th>
2129 <tr><td>QASYMMU8
2130 <tr><td>QASYMMS8
2131 <tr><td>QSYMMS16
2132 <tr><td>FLOAT16
2133 <tr><td>FLOAT32
2134 <tr><td>SIGNED32
2135 </table>
2136<tr>
2137 <td rowspan="3">MultiplicationLayer
2138 <td rowspan="3" style="width:200px;"> Layer to perform an elementwise multiplication of two tensors.
2139 <td rowspan="3">
2140 <ul>
2141 <li>ANEURALNETWORKS_MUL
2142 </ul>
2143 <td>CpuRef
2144 <td>
2145 <ul>
2146 <li>All
2147 </ul>
2148 <td>
2149 <table>
2150 <tr><th>
2151 <tr><td>BFLOAT16
2152 <tr><td>FLOAT16
2153 <tr><td>FLOAT32
2154 <tr><td>QASYMMS8
2155 <tr><td>QASYMMU8
2156 <tr><td>QSYMMS16
2157 <tr><td>SIGNED32
2158 </table>
2159<tr>
2160 <td>CpuAcc
2161 <td>
2162 <ul>
2163 <li>All
2164 </ul>
2165 <td>
2166 <table>
2167 <tr><th>
2168 <tr><td>QASYMMU8
2169 <tr><td>QASYMMS8
2170 <tr><td>QSYMMS16
2171 <tr><td>SIGNED32
2172 <tr><td>FLOAT16
2173 <tr><td>FLOAT32
2174 </table>
2175<tr>
2176 <td>GpuAcc
2177 <td>
2178 <ul>
2179 <li>All
2180 </ul>
2181 <td>
2182 <table>
2183 <tr><th>
2184 <tr><td>QASYMMU8
2185 <tr><td>QASYMMS8
2186 <tr><td>QSYMMS16
2187 <tr><td>SIGNED32
2188 <tr><td>FLOAT16
2189 <tr><td>FLOAT32
2190 <tr><td>SIGNED32
2191 </table>
2192<tr>
2193 <td rowspan="3">NormalizationLayer
2194 <td rowspan="3" style="width:200px;"> Layer to compute normalization operation.
2195 <td rowspan="3">
2196 <ul>
2197 <li>ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION
2198 </ul>
2199 <td>CpuRef
2200 <td>
2201 <ul>
2202 <li>All
2203 </ul>
2204 <td>
2205 <table>
2206 <tr><th>
2207 <tr><td>BFLOAT16
2208 <tr><td>FLOAT16
2209 <tr><td>FLOAT32
2210 <tr><td>QASYMMS8
2211 <tr><td>QASYMMU8
2212 <tr><td>QSYMMS16
2213 </table>
2214<tr>
2215 <td>CpuAcc
2216 <td>
2217 <ul>
2218 <li>NHWC
2219 <li>NCHW
2220 </ul>
2221 <td>
2222 <table>
2223 <tr><th>
2224 <tr><td>FLOAT32
2225 <tr><td>FLOAT16
2226 </table>
2227<tr>
2228 <td>GpuAcc
2229 <td>
2230 <ul>
2231 <li>NHWC
2232 <li>NCHW
2233 </ul>
2234 <td>
2235 <table>
2236 <tr><th>
2237 <tr><td>FLOAT32
2238 <tr><td>FLOAT16
2239 </table>
2240<tr>
2241 <td rowspan="1">OutputLayer
2242 <td rowspan="1" style="width:200px;"> A special layer providing access to a user supplied buffer into which the output of a network can be written.
2243 <td rowspan="1">
2244 <ul>
2245 <li>N/A
2246 </ul>
2247 <td>All
2248 <td>
2249 <ul>
2250 <li>All
2251 </ul>
2252 <td>
2253 <table>
2254 <tr><th>
2255 <tr><td>All
2256 </table>
2257<tr>
2258 <td rowspan="3">PadLayer
2259 <td rowspan="3" style="width:200px;"> Layer to pad a tensor.
2260 <td rowspan="3">
2261 <ul>
2262 <li>ANEURALNETWORKS_PAD
2263 <li>ANEURALNETWORKS_PAD_V2
2264 </ul>
2265 <td>CpuRef
2266 <td>
2267 <ul>
2268 <li>All
2269 </ul>
2270 <td>
2271 <table>
2272 <tr><th>
2273 <tr><td>BFLOAT16
2274 <tr><td>FLOAT16
2275 <tr><td>FLOAT32
2276 <tr><td>QASYMMS8
2277 <tr><td>QASYMMU8
2278 <tr><td>QSYMMS16
2279 </table>
2280<tr>
2281 <td>CpuAcc
2282 <td>
2283 <ul>
2284 <li>NHWC
2285 <li>NCHW
2286 </ul>
2287 <td>
2288 <table>
2289 <tr><th>
2290 <tr><td>All
2291 </table>
2292<tr>
2293 <td>GpuAcc
2294 <td>
2295 <ul>
2296 <li>NHWC
2297 <li>NCHW
2298 </ul>
2299 <td>
2300 <table>
2301 <tr><th>
2302 <tr><td>All
2303 </table>
2304<tr>
2305 <td rowspan="3">PermuteLayer
2306 <td rowspan="3" style="width:200px;"> Layer to transpose an ND tensor.
2307 <td rowspan="3">
2308 <ul>
2309 <li>ANEURALNETWORKS_TRANSPOSE
2310 </ul>
2311 <td>CpuRef
2312 <td>
2313 <ul>
2314 <li>All
2315 </ul>
2316 <td>
2317 <table>
2318 <tr><th>
2319 <tr><td>BFLOAT16
2320 <tr><td>FLOAT16
2321 <tr><td>FLOAT32
2322 <tr><td>QASYMMS8
2323 <tr><td>QASYMMU8
2324 <tr><td>QSYMMS16
2325 </table>
2326<tr>
2327 <td>CpuAcc
2328 <td>
2329 <ul>
2330 <li>NHWC
2331 <li>NCHW
2332 </ul>
2333 <td>
2334 <table>
2335 <tr><th>
2336 <tr><td>All
2337 </table>
2338<tr>
2339 <td>GpuAcc
2340 <td>
2341 <ul>
2342 <li>NHWC
2343 <li>NCHW
2344 </ul>
2345 <td>
2346 <table>
2347 <tr><th>
2348 <tr><td>All
2349 </table>
2350<tr>
2351 <td rowspan="3">Pooling2dLayer
Tamás Nyíri7b885b32021-10-26 14:47:57 +01002352 <td rowspan="3" style="width:200px;"> Layer to perform 2D pooling with the specified pooling operation.
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002353 <td rowspan="3">
2354 <ul>
2355 <li>ANEURALNETWORKS_AVERAGE_POOL_2D
2356 <li>ANEURALNETWORKS_L2_POOL_2D
2357 <li>ANEURALNETWORKS_MAX_POOL_2D
2358 </ul>
2359 <td>CpuRef
2360 <td>
2361 <ul>
2362 <li>All
2363 </ul>
2364 <td>
2365 <table>
2366 <tr><th>
2367 <tr><td>BFLOAT16
2368 <tr><td>FLOAT16
2369 <tr><td>FLOAT32
2370 <tr><td>QASYMMS8
2371 <tr><td>QASYMMU8
2372 <tr><td>QSYMMS16
2373 </table>
2374<tr>
2375 <td>CpuAcc
2376 <td>
2377 <ul>
2378 <li>NHWC
2379 <li>NCHW
2380 </ul>
2381 <td>
2382 <table>
2383 <tr><th>
2384 <tr><td>QASYMMU8
2385 <tr><td>QASYMMS8
2386 <tr><td>FLOAT16
2387 <tr><td>FLOAT32
2388 </table>
2389<tr>
2390 <td>GpuAcc
2391 <td>
2392 <ul>
2393 <li>NHWC
2394 <li>NCHW
2395 </ul>
2396 <td>
2397 <table>
2398 <tr><th>
2399 <tr><td>QASYMMU8
2400 <tr><td>QASYMMS8
2401 <tr><td>FLOAT16
2402 <tr><td>FLOAT32
2403 </table>
2404<tr>
Tamás Nyíri7b885b32021-10-26 14:47:57 +01002405 <td rowspan="3">Pooling3dLayer
2406 <td rowspan="3" style="width:200px;"> Layer to perform 3D pooling with the specified pooling operation.
2407 <td rowspan="3">
2408 <ul>
2409 <li>ANEURALNETWORKS_AVERAGE_POOL_3D
2410 <li>ANEURALNETWORKS_L2_POOL_3D
2411 <li>ANEURALNETWORKS_MAX_POOL_3D
2412 </ul>
2413 <td>CpuRef
2414 <td>
2415 <ul>
2416 <li>NDHWC
2417 </ul>
2418 <td>
2419 <table>
2420 <tr><th>
2421 <tr><td>BFLOAT16
2422 <tr><td>FLOAT16
2423 <tr><td>FLOAT32
2424 <tr><td>QASYMMS8
2425 <tr><td>QASYMMU8
2426 <tr><td>QSYMMS16
2427 </table>
2428<tr>
2429 <td>CpuAcc
2430 <td>
2431 <ul>
2432 <li>NA
2433 </ul>
2434 <td>
2435<tr>
2436 <td>GpuAcc
2437 <td>
2438 <ul>
2439 <li>NDHWC
2440 </ul>
Nikhil Raj930e1a22023-06-08 09:49:46 +01002441 <td>
Tamás Nyíri7b885b32021-10-26 14:47:57 +01002442<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002443 <td rowspan="1">PreCompiledLayer
2444 <td rowspan="1" style="width:200px;"> Opaque layer provided by a backend which provides an executable representation of a subgraph from the original network.
2445 <td rowspan="1">
2446 <ul>
2447 <li>N/A
2448 </ul>
2449 <td>N/A
2450 <td>N/A
2451 <td>N/A
2452<tr>
2453 <td rowspan="3">PreluLayer
2454 <td rowspan="3" style="width:200px;"> Layer to compute the activation layer with the PRELU activation function.
2455 <td rowspan="3">
2456 <ul>
2457 <li>ANEURALNETWORKS_PRELU
2458 </ul>
2459 <td>CpuRef
2460 <td>
2461 <ul>
2462 <li>All
2463 </ul>
2464 <td>
2465 <table>
2466 <tr><th>
2467 <tr><td>BFLOAT16
2468 <tr><td>FLOAT16
2469 <tr><td>FLOAT32
2470 <tr><td>QASYMMS8
2471 <tr><td>QASYMMU8
2472 <tr><td>QSYMMS16
2473 </table>
2474<tr>
2475 <td>CpuAcc
2476 <td>
2477 <ul>
2478 <li>All
2479 </ul>
2480 <td>
2481 <table>
2482 <tr><th>
2483 <tr><td>QASYMMU8
2484 <tr><td>QASYMMS8
2485 <tr><td>FLOAT16
2486 <tr><td>FLOAT32
2487 </table>
2488<tr>
2489 <td>GpuAcc
2490 <td>
2491 <ul>
2492 <li>All
2493 </ul>
2494 <td>
2495 <table>
2496 <tr><th>
2497 <tr><td>QASYMMU8
2498 <tr><td>QASYMMS8
2499 <tr><td>FLOAT16
2500 <tr><td>FLOAT32
2501 </table>
2502<tr>
2503 <td rowspan="3">QLstmLayer
2504 <td rowspan="3" style="width:200px;"> Layer to perform quantized LSTM (Long Short-Term Memory) operation.
2505 <td rowspan="3">
2506 <ul>
2507 <li>ANEURALNETWORKS_QUANTIZED_LSTM
2508 <li>ANEURALNETWORKS_QUANTIZED_16BIT_LSTM
2509 </ul>
2510 <td>CpuRef
2511 <td>
2512 <ul>
2513 <li>All
2514 </ul>
2515 <td>
2516 <table>
2517 <tr><th>
2518 <tr><td>All
2519 </table>
2520<tr>
2521 <td>CpuAcc
2522 <td>
2523 <ul>
2524 <li>All
2525 </ul>
2526 <td>
2527 <table>
2528 <tr><th>
2529 <tr><td>QASYMMS8
2530 <tr><td>QASYMMU8
2531 <tr><td>SIGNED32
2532 <tr><td>QSYMMS16
2533 </table>
2534<tr>
2535 <td>GpuAcc
2536 <td>
2537 <ul>
2538 <li>All
2539 </ul>
2540 <td>
2541 <table>
2542 <tr><th>
2543 <tr><td>QASYMMS8
2544 <tr><td>QASYMMU8
2545 <tr><td>SIGNED32
2546 <tr><td>QSYMMS16
2547 </table>
2548<tr>
2549 <td rowspan="3">QuantizeLayer
2550 <td rowspan="3" style="width:200px;"> Layer to perform quantization operation.
2551 <td rowspan="3">
2552 <ul>
2553 <li>ANEURALNETWORKS_QUANTIZE
2554 </ul>
2555 <td>CpuRef
2556 <td>
2557 <ul>
2558 <li>All
2559 </ul>
2560 <td>
2561 <table>
2562 <tr><th>
2563 <tr><td>BFLOAT16
2564 <tr><td>FLOAT16
2565 <tr><td>FLOAT32
2566 <tr><td>QASYMMS8
2567 <tr><td>QASYMMU8
2568 <tr><td>QSYMMS8
2569 <tr><td>QSYMMS16
2570 </table>
2571<tr>
2572 <td>CpuAcc
2573 <td>
2574 <ul>
2575 <li>All
2576 </ul>
2577 <td>
2578 <table>
2579 <tr><th>
2580 <tr><td>QASYMMU8
2581 <tr><td>QASYMMS8
2582 <tr><td>QASYMM16
2583 <tr><td>FLOAT16
2584 <tr><td>FLOAT32
2585 </table>
2586<tr>
2587 <td>GpuAcc
2588 <td>
2589 <ul>
2590 <li>All
2591 </ul>
2592 <td>
2593 <table>
2594 <tr><th>
2595 <tr><td>QASYMMU8
2596 <tr><td>QASYMMS8
2597 <tr><td>QASYMM16
2598 <tr><td>FLOAT16
2599 <tr><td>FLOAT32
2600 </table>
2601<tr>
2602 <td rowspan="3">QuantizedLstmLayer
2603 <td rowspan="3" style="width:200px;"> Layer to perform quantized LSTM (Long Short-Term Memory) operation.
2604 <td rowspan="3">
2605 <ul>
2606 <li>ANEURALNETWORKS_QUANTIZED_LSTM
2607 <li>ANEURALNETWORKS_QUANTIZED_16BIT_LSTM
2608 </ul>
2609 <td>CpuRef
2610 <td>
2611 <ul>
2612 <li>All
2613 </ul>
2614 <td>
2615 <table>
2616 <tr><th>
2617 <tr><td>All
2618 </table>
2619<tr>
2620 <td>CpuAcc
2621 <td>
2622 <ul>
2623 <li>All
2624 </ul>
2625 <td>
2626 <table>
2627 <tr><th>
2628 <tr><td>SIGNED32
2629 <tr><td>QASYMMU8
2630 <tr><td>QSYMMS16
2631 </table>
2632<tr>
2633 <td>GpuAcc
2634 <td>
2635 <ul>
2636 <li>All
2637 </ul>
2638 <td>
2639 <table>
2640 <tr><th>
2641 <tr><td>SIGNED32
2642 <tr><td>QASYMMU8
2643 <tr><td>QSYMMS16
2644 </table>
2645<tr>
2646 <td rowspan="3">RankLayer
2647 <td rowspan="3" style="width:200px;"> Layer to perform a rank operation.
2648 <td rowspan="3">
2649 <ul>
2650 <li>ANEURALNETWORKS_RANK
2651 </ul>
2652 <td>CpuRef
2653 <td>
2654 <ul>
2655 <li>All
2656 </ul>
2657 <td>
2658 <table>
2659 <tr><th>
2660 <tr><td>All
2661 </table>
2662<tr>
2663 <td>CpuAcc
2664 <td>
2665 <ul>
2666 <li>All
2667 </ul>
2668 <td>
2669 <table>
2670 <tr><th>
2671 <tr><td>All
2672 </table>
2673<tr>
2674 <td>GpuAcc
2675 <td>
2676 <ul>
2677 <li>All
2678 </ul>
2679 <td>
2680 <table>
2681 <tr><th>
2682 <tr><td>All
2683 </table>
2684<tr>
2685 <td rowspan="3">ReduceLayer
2686 <td rowspan="3" style="width:200px;"> Layer to perform reduce with the following operations - ARG_IDX_MAX: Index of the max value - ARG_IDX_MIN: Index of the min value - MEAN_SUM: Mean of sum - PROD: Product - SUM_SQUARE: Sum of squares - SUM: Sum - MIN: Min - MAX: Max
2687 <td rowspan="3">
2688 <ul>
2689 <li>ANEURALNETWORKS_REDUCE_MAX
2690 <li>ANEURALNETWORKS_REDUCE_MIN
2691 <li>ANEURALNETWORKS_REDUCE_SUM
Teresa Charlin32b78702021-09-03 11:25:54 +01002692 <li>ANEURALNETWORKS_REDUCE_PROD
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002693 </ul>
2694 <td>CpuRef
2695 <td>
2696 <ul>
2697 <li>All
2698 </ul>
2699 <td>
2700 <table>
2701 <tr><th>
2702 <tr><td>BFLOAT16
2703 <tr><td>FLOAT16
2704 <tr><td>FLOAT32
2705 <tr><td>QASYMMS8
2706 <tr><td>QASYMMU8
2707 <tr><td>QSYMMS16
2708 <tr><td>SIGNED32
2709 </table>
2710<tr>
2711 <td>CpuAcc
2712 <td>
2713 <ul>
2714 <li>All
2715 </ul>
2716 <td>
2717 <table>
2718 <tr><th>
2719 <tr><td>QASYMMU8
2720 <tr><td>QASYMMS8
2721 <tr><td>FLOAT16
2722 <tr><td>FLOAT32
2723 <tr><td>SIGNED32
2724 </table>
2725<tr>
2726 <td>GpuAcc
2727 <td>
2728 <ul>
2729 <li>All
2730 </ul>
2731 <td>
2732 <table>
2733 <tr><th>
2734 <tr><td>QASYMMU8
2735 <tr><td>QASYMMS8
2736 <tr><td>FLOAT16
2737 <tr><td>FLOAT32
2738 <tr><td>SIGNED32
2739 </table>
2740<tr>
2741 <td rowspan="3">ReshapeLayer
2742 <td rowspan="3" style="width:200px;"> Layer to reshape a tensor.
2743 <td rowspan="3">
2744 <ul>
2745 <li>ANEURALNETWORKS_RESHAPE
2746 <li>ANEURALNETWORKS_SQUEEZE
2747 <li>ANEURALNETWORKS_EXPAND_DIMS
2748 </ul>
2749 <td>CpuRef
2750 <td>
2751 <ul>
2752 <li>All
2753 </ul>
2754 <td>
2755 <table>
2756 <tr><th>
2757 <tr><td>BFLOAT16
2758 <tr><td>FLOAT16
2759 <tr><td>FLOAT32
2760 <tr><td>QASYMMS8
2761 <tr><td>QASYMMU8
2762 <tr><td>QSYMMS16
2763 <tr><td>SIGNED32
2764 <tr><td>BOOLEAN
2765 </table>
2766<tr>
2767 <td>CpuAcc
2768 <td>
2769 <ul>
2770 <li>All
2771 </ul>
2772 <td>
2773 <table>
2774 <tr><th>
2775 <tr><td>All
2776 </table>
2777<tr>
2778 <td>GpuAcc
2779 <td>
2780 <ul>
2781 <li>All
2782 </ul>
2783 <td>
2784 <table>
2785 <tr><th>
2786 <tr><td>All
2787 </table>
2788<tr>
2789 <td rowspan="3">ResizeLayer
2790 <td rowspan="3" style="width:200px;"> Layer to perform resize of a tensor using one of the interpolation methods: - Bilinear - Nearest Neighbor.
2791 <td rowspan="3">
2792 <ul>
2793 <li>ANEURALNETWORKS_RESIZE_BILINEAR
2794 <li>ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR
2795 </ul>
2796 <td>CpuRef
2797 <td>
2798 <ul>
2799 <li>All
2800 </ul>
2801 <td>
2802 <table>
2803 <tr><th>
2804 <tr><td>BFLOAT16
2805 <tr><td>FLOAT16
2806 <tr><td>FLOAT32
2807 <tr><td>QASYMMS8
2808 <tr><td>QASYMMU8
2809 <tr><td>QSYMMS16
2810 </table>
2811<tr>
2812 <td>CpuAcc
2813 <td>
2814 <ul>
2815 <li>NHWC
2816 <li>NCHW
2817 </ul>
2818 <td>
2819 <table>
2820 <tr><th>
2821 <tr><td>QASYMMU8
2822 <tr><td>QASYMMS8
2823 <tr><td>FLOAT16
2824 <tr><td>FLOAT32
2825 </table>
2826<tr>
Tracy Narine944fb502023-07-04 15:08:57 +01002827 <td>GpuAcc
2828 <td>
2829 <ul>
2830 <li>NHWC
2831 <li>NCHW
2832 </ul>
2833 <td>
2834 <table>
2835 <tr><th>
2836 <tr><td>QASYMMU8
2837 <tr><td>QASYMMS8
2838 <tr><td>FLOAT16
2839 <tr><td>FLOAT32
2840 </table>
2841<tr>
Tianle Cheng988354d2023-06-28 13:20:47 +01002842 <td rowspan="3">ReverseV2Layer
Tracy Narine944fb502023-07-04 15:08:57 +01002843 <td rowspan="3" style="width:200px;"> Layer to perform reverse of a tensor.
Tianle Cheng988354d2023-06-28 13:20:47 +01002844 <td rowspan="3">
2845 <ul>
Tracy Narine944fb502023-07-04 15:08:57 +01002846 <li>NA
Tianle Cheng988354d2023-06-28 13:20:47 +01002847 </ul>
2848 <td>CpuRef
2849 <td>
2850 <ul>
2851 <li>All
2852 </ul>
2853 <td>
2854 <table>
2855 <tr><th>
2856 <tr><td>BFLOAT16
2857 <tr><td>FLOAT16
2858 <tr><td>FLOAT32
2859 <tr><td>QASYMMS8
2860 <tr><td>QASYMMU8
2861 <tr><td>QSYMMS16
2862 </table>
2863<tr>
2864 <td>CpuAcc
2865 <td>
2866 <ul>
Tracy Narine944fb502023-07-04 15:08:57 +01002867 <li>All
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002868 </ul>
2869 <td>
2870 <table>
2871 <tr><th>
Tracy Narine944fb502023-07-04 15:08:57 +01002872 <tr><td>All
2873 </table>
2874<tr>
2875 <td>GpuAcc
2876 <td>
2877 <ul>
2878 <li>All
2879 </ul>
2880 <td>
2881 <table>
2882 <tr><th>
2883 <tr><td>All
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002884 </table>
2885<tr>
2886 <td rowspan="3">RsqrtLayer
2887 <td rowspan="3" style="width:200px;"> Layer to perform Rsqrt operation.
2888 <td rowspan="3">
2889 <ul>
2890 <li>ANEURALNETWORKS_RSQRT
2891 </ul>
2892 <td>CpuRef
2893 <td>
2894 <ul>
2895 <li>All
2896 </ul>
2897 <td>
2898 <table>
2899 <tr><th>
2900 <tr><td>BFLOAT16
2901 <tr><td>FLOAT16
2902 <tr><td>FLOAT32
2903 <tr><td>QASYMMS8
2904 <tr><td>QASYMMU8
2905 <tr><td>QSYMMS16
2906 <tr><td>SIGNED32
2907 </table>
2908<tr>
2909 <td>CpuAcc
2910 <td>
2911 <ul>
2912 <li>All
2913 </ul>
2914 <td>
2915 <table>
2916 <tr><th>
2917 <tr><td>FLOAT16
2918 <tr><td>FLOAT32
2919 <tr><td>SIGNED32
2920 </table>
2921<tr>
2922 <td>GpuAcc
2923 <td>
2924 <ul>
2925 <li>All
2926 </ul>
2927 <td>
2928 <table>
2929 <tr><th>
2930 <tr><td>FLOAT16
2931 <tr><td>FLOAT32
2932 </table>
2933<tr>
Tianle Cheng28288182024-02-23 17:56:54 +00002934 <td rowspan="3">ScatterLayer
2935 <td rowspan="3" style="width:200px;"> Layer to scatter updates according to individual values at the specified indices.
2936 <td rowspan="3">
2937 <ul>
2938 <li>N/A
2939 </ul>
2940 <td>CpuRef
2941 <td>
2942 <ul>
2943 <li>All
2944 </ul>
2945 <td>
2946 <table>
2947 <tr><th>
2948 <tr><td>FLOAT16
2949 <tr><td>FLOAT32
2950 <tr><td>QASYMMS8
2951 <tr><td>QASYMMU8
2952 <tr><td>QSYMMS8
2953 <tr><td>QSYMMS16
2954 <tr><td>SIGNED32
2955 </table>
2956<tr>
2957 <td>CpuAcc
2958 <td>
2959 <ul>
2960 <li>All
2961 </ul>
2962 <td>
2963 <table>
2964 <tr><th>
2965 <tr><td>FLOAT16
2966 <tr><td>FLOAT32
2967 <tr><td>QASYMMS8
2968 <tr><td>QASYMMU8
2969 <tr><td>QASYMM8
2970 <tr><td>QSYMMS16
2971 <tr><td>SIGNED32
2972 </table>
2973<tr>
2974 <td>GpuAcc
2975 <td>
2976 <ul>
2977 <li>All
2978 </ul>
2979 <td>
2980 <table>
2981 <tr><th>
2982 <tr><td>FLOAT16
2983 <tr><td>FLOAT32
2984 <tr><td>QASYMMS8
2985 <tr><td>QASYMMU8
2986 <tr><td>QSYMMS8
2987 <tr><td>QSYMMS16
2988 <tr><td>SIGNED32
2989 </table>
2990<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002991 <td rowspan="3">ShapeLayer
2992 <td rowspan="3" style="width:200px;"> Layer to return the shape of the input tensor.
2993 <td rowspan="3">
2994 <ul>
2995 <li>N/A
2996 </ul>
2997 <td>CpuRef
2998 <td>
2999 <ul>
3000 <li>All
3001 </ul>
3002 <td>
3003 <table>
3004 <tr><th>
3005 <tr><td>All
3006 </table>
3007<tr>
3008 <td>CpuAcc
3009 <td>
3010 <ul>
3011 <li>All
3012 </ul>
3013 <td>
3014 <table>
3015 <tr><th>
3016 <tr><td>All
3017 </table>
3018<tr>
3019 <td>GpuAcc
3020 <td>
3021 <ul>
3022 <li>All
3023 </ul>
3024 <td>
3025 <table>
3026 <tr><th>
3027 <tr><td>All
3028 </table>
3029<tr>
3030 <td rowspan="3">SliceLayer
3031 <td rowspan="3" style="width:200px;"> Layer to perform tensor slicing.
3032 <td rowspan="3">
3033 <ul>
3034 <li>ANEURALNETWORKS_SLICE
3035 </ul>
3036 <td>CpuRef
3037 <td>
3038 <ul>
3039 <li>All
3040 </ul>
3041 <td>
3042 <table>
3043 <tr><th>
3044 <tr><td>BFLOAT16
3045 <tr><td>FLOAT32
3046 <tr><td>QASYMMS8
3047 <tr><td>QASYMMU8
3048 <tr><td>QSYMMS16
3049 </table>
3050<tr>
3051 <td>CpuAcc
3052 <td>
3053 <ul>
3054 <li>All
3055 </ul>
3056 <td>
3057 <table>
3058 <tr><th>
3059 <tr><td>All
3060 </table>
3061<tr>
3062 <td>GpuAcc
3063 <td>
3064 <ul>
3065 <li>All
3066 </ul>
3067 <td>
3068 <table>
3069 <tr><th>
3070 <tr><td>All
3071 </table>
3072<tr>
3073 <td rowspan="3">SoftmaxLayer
3074 <td rowspan="3" style="width:200px;"> Layer to perform softmax, log-softmax operation over the specified axis.
3075 <td rowspan="3">
3076 <ul>
3077 <li>ANEURALNETWORKS_LOG_SOFTMAX
3078 <li>ANEURALNETWORKS_SOFTMAX
3079 </ul>
3080 <td>CpuRef
3081 <td>
3082 <ul>
3083 <li>All
3084 </ul>
3085 <td>
3086 <table>
3087 <tr><th>
3088 <tr><td>BFLOAT16
3089 <tr><td>FLOAT16
3090 <tr><td>FLOAT32
3091 <tr><td>QASYMMS8
3092 <tr><td>QASYMMU8
3093 <tr><td>QSYMMS8
3094 <tr><td>QSYMMS16
3095 </table>
3096<tr>
3097 <td>CpuAcc
3098 <td>
3099 <ul>
3100 <li>All
3101 </ul>
3102 <td>
3103 <table>
3104 <tr><th>
3105 <tr><td>QASYMMU8
3106 <tr><td>QASYMMS8
3107 <tr><td>FLOAT16
3108 <tr><td>FLOAT32
3109 </table>
3110<tr>
3111 <td>GpuAcc
3112 <td>
3113 <ul>
3114 <li>All
3115 </ul>
3116 <td>
3117 <table>
3118 <tr><th>
3119 <tr><td>QASYMMU8
3120 <tr><td>QASYMMS8
3121 <tr><td>FLOAT16
3122 <tr><td>FLOAT32
3123 </table>
3124<tr>
3125 <td rowspan="3">SpaceToBatchNdLayer
3126 <td rowspan="3" style="width:200px;"> Layer to divide spatial dimensions of the tensor into a grid of blocks and interleaves these blocks with the batch dimension.
3127 <td rowspan="3">
3128 <ul>
3129 <li>ANEURALNETWORKS_SPACE_TO_BATCH_ND
3130 </ul>
3131 <td>CpuRef
3132 <td>
3133 <ul>
3134 <li>All
3135 </ul>
3136 <td>
3137 <table>
3138 <tr><th>
3139 <tr><td>BFLOAT16
3140 <tr><td>FLOAT16
3141 <tr><td>FLOAT32
3142 <tr><td>QASYMMS8
3143 <tr><td>QASYMMU8
3144 <tr><td>QSYMMS16
3145 </table>
3146<tr>
3147 <td>CpuAcc
3148 <td>
3149 <ul>
3150 <li>NHWC
3151 <li>NCHW
3152 </ul>
3153 <td>
3154 <table>
3155 <tr><th>
3156 <tr><td>All
3157 </table>
3158<tr>
3159 <td>GpuAcc
3160 <td>
3161 <ul>
3162 <li>NHWC
3163 <li>NCHW
3164 </ul>
3165 <td>
3166 <table>
3167 <tr><th>
3168 <tr><td>All
3169 </table>
3170<tr>
3171 <td rowspan="3">SpaceToDepthLayer
3172 <td rowspan="3" style="width:200px;"> Layer to rearrange blocks of spatial data into depth.
3173 <td rowspan="3">
3174 <ul>
3175 <li>ANEURALNETWORKS_SPACE_TO_DEPTH
3176 </ul>
3177 <td>CpuRef
3178 <td>
3179 <ul>
3180 <li>All
3181 </ul>
3182 <td>
3183 <table>
3184 <tr><th>
3185 <tr><td>BFLOAT16
3186 <tr><td>FLOAT16
3187 <tr><td>FLOAT32
3188 <tr><td>QASYMMS8
3189 <tr><td>QASYMMU8
3190 <tr><td>QSYMMS16
3191 </table>
3192<tr>
3193 <td>CpuAcc
3194 <td>
3195 <ul>
3196 <li>NHWC
3197 <li>NCHW
3198 </ul>
3199 <td>
3200 <table>
3201 <tr><th>
3202 <tr><td>All
3203 </table>
3204<tr>
3205 <td>GpuAcc
3206 <td>
3207 <ul>
3208 <li>NHWC
3209 <li>NCHW
3210 </ul>
3211 <td>
3212 <table>
3213 <tr><th>
3214 <tr><td>All
3215 </table>
3216<tr>
3217 <td rowspan="3">SplitterLayer
3218 <td rowspan="3" style="width:200px;"> Layer to split a tensor along a given axis.
3219 <td rowspan="3">
3220 <ul>
3221 <li>ANEURALNETWORKS_SPLIT
3222 </ul>
3223 <td>CpuRef
3224 <td>
3225 <ul>
3226 <li>All
3227 </ul>
3228 <td>
3229 <table>
3230 <tr><th>
3231 <tr><td>BFLOAT16
3232 <tr><td>FLOAT16
3233 <tr><td>FLOAT32
3234 <tr><td>QASYMMS8
3235 <tr><td>QASYMMU8
3236 <tr><td>QSYMMS16
3237 </table>
3238<tr>
3239 <td>CpuAcc
3240 <td>
3241 <ul>
3242 <li>All
3243 </ul>
3244 <td>
3245 <table>
3246 <tr><th>
3247 <tr><td>All
3248 </table>
3249<tr>
3250 <td>GpuAcc
3251 <td>
3252 <ul>
3253 <li>All
3254 </ul>
3255 <td>
3256 <table>
3257 <tr><th>
3258 <tr><td>All
3259 </table>
3260<tr>
3261 <td rowspan="3">StackLayer
3262 <td rowspan="3" style="width:200px;"> Layer to stack tensors along an axis.
3263 <td rowspan="3">
3264 <ul>
3265 <li>N/A
3266 </ul>
3267 <td>CpuRef
3268 <td>
3269 <ul>
3270 <li>All
3271 </ul>
3272 <td>
3273 <table>
3274 <tr><th>
3275 <tr><td>BFLOAT16
3276 <tr><td>FLOAT16
3277 <tr><td>FLOAT32
3278 <tr><td>QASYMMS8
3279 <tr><td>QASYMMU8
3280 <tr><td>QSYMMS16
3281 </table>
3282<tr>
3283 <td>CpuAcc
3284 <td>
3285 <ul>
3286 <li>All
3287 </ul>
3288 <td>
3289 <table>
3290 <tr><th>
3291 <tr><td>All
3292 </table>
3293<tr>
3294 <td>GpuAcc
3295 <td>
3296 <ul>
3297 <li>All
3298 </ul>
3299 <td>
3300 <table>
3301 <tr><th>
3302 <tr><td>All
3303 </table>
3304<tr>
3305 <td rowspan="1">StandInLayer
3306 <td rowspan="1" style="width:200px;"> A layer to represent "unknown" or "unsupported" operations in the input graph. It has a configurable number of input and output slots and an optional name.
3307 <td rowspan="1">
3308 <ul>
3309 <li>N/A
3310 </ul>
3311 <td>N/A
3312 <td>N/A
3313 <td>N/A
3314<tr>
3315 <td rowspan="3">StridedSliceLayer
3316 <td rowspan="3" style="width:200px;"> Layer to extract a strided slice of a tensor.
3317 <td rowspan="3">
3318 <ul>
3319 <li>ANEURALNETWORKS_STRIDED_SLICE
3320 </ul>
3321 <td>CpuRef
3322 <td>
3323 <ul>
3324 <li>All
3325 </ul>
3326 <td>
3327 <table>
3328 <tr><th>
3329 <tr><td>BFLOAT16
3330 <tr><td>FLOAT32
3331 <tr><td>QASYMMS8
3332 <tr><td>QASYMMU8
3333 <tr><td>QSYMMS16
3334 </table>
3335<tr>
3336 <td>CpuAcc
3337 <td>
3338 <ul>
3339 <li>All
3340 </ul>
3341 <td>
3342 <table>
3343 <tr><th>
3344 <tr><td>All
3345 </table>
3346<tr>
3347 <td>GpuAcc
3348 <td>
3349 <ul>
3350 <li>All
3351 </ul>
3352 <td>
3353 <table>
3354 <tr><th>
3355 <tr><td>All
3356 </table>
3357<tr>
3358 <td rowspan="3">SubtractionLayer
3359 <td rowspan="3" style="width:200px;"> Layer to perform an elementwise subtract of 2 tensors.
3360 <td rowspan="3">
3361 <ul>
3362 <li>ANEURALNETWORKS_SUB
3363 </ul>
3364 <td>CpuRef
3365 <td>
3366 <ul>
3367 <li>All
3368 </ul>
3369 <td>
3370 <table>
3371 <tr><th>
3372 <tr><td>BFLOAT16
3373 <tr><td>FLOAT16
3374 <tr><td>FLOAT32
3375 <tr><td>QASYMMS8
3376 <tr><td>QASYMMU8
3377 <tr><td>QSYMMS16
3378 <tr><td>SIGNED32
3379 </table>
3380<tr>
3381 <td>CpuAcc
3382 <td>
3383 <ul>
3384 <li>All
3385 </ul>
3386 <td>
3387 <table>
3388 <tr><th>
3389 <tr><td>QASYMMU8
3390 <tr><td>QASYMMS8
3391 <tr><td>QSYMMS16
3392 <tr><td>SIGNED32
3393 <tr><td>FLOAT16
3394 <tr><td>FLOAT32
3395 </table>
3396<tr>
3397 <td>GpuAcc
3398 <td>
3399 <ul>
3400 <li>All
3401 </ul>
3402 <td>
3403 <table>
3404 <tr><th>
3405 <tr><td>QASYMMU8
3406 <tr><td>QASYMMS8
3407 <tr><td>QSYMMS16
3408 <tr><td>SIGNED32
3409 <tr><td>FLOAT16
3410 <tr><td>FLOAT32
3411 </table>
3412<tr>
Teresa Charlin79a06a52023-07-13 17:16:45 +01003413 <td rowspan="3">TileLayer
3414 <td rowspan="3" style="width:200px;"> Layer to construct a tensor by repeating in tiles a given tensor.
3415 <td rowspan="3">
3416 <ul>
3417 <li>ANEURALNETWORKS_TILE
3418 </ul>
3419 <td>CpuRef
3420 <td>
3421 <ul>
3422 <li>All
3423 </ul>
3424 <td>
3425 <table>
3426 <tr><th>
3427 <tr><td>FLOAT16
3428 <tr><td>FLOAT32
3429 <tr><td>QASYMMS8
3430 <tr><td>QASYMMU8
Cian McGriskin3b3dcbf2023-07-26 11:52:47 +01003431 <tr><td>QSYMMS8
Teresa Charlin79a06a52023-07-13 17:16:45 +01003432 <tr><td>QSYMMS16
3433 <tr><td>SIGNED32
3434 </table>
3435<tr>
3436 <td>CpuAcc
3437 <td>
3438 <ul>
David Monahan36e6eae2023-07-26 18:37:45 +01003439 <li>All
Teresa Charlin79a06a52023-07-13 17:16:45 +01003440 </ul>
3441 <td>
David Monahan36e6eae2023-07-26 18:37:45 +01003442 <table>
3443 <tr><th>
3444 <tr><td>FLOAT16
3445 <tr><td>FLOAT32
3446 <tr><td>QASYMMS8
3447 <tr><td>QASYMMU8
3448 <tr><td>QASYMM8
3449 <tr><td>QSYMMS16
3450 <tr><td>SIGNED32
3451 </table>
Teresa Charlin79a06a52023-07-13 17:16:45 +01003452<tr>
3453 <td>GpuAcc
3454 <td>
3455 <ul>
Cian McGriskin3b3dcbf2023-07-26 11:52:47 +01003456 <li>All
Teresa Charlin79a06a52023-07-13 17:16:45 +01003457 </ul>
3458 <td>
3459 <table>
3460 <tr><th>
Cian McGriskin3b3dcbf2023-07-26 11:52:47 +01003461 <tr><td>FLOAT16
3462 <tr><td>FLOAT32
3463 <tr><td>QASYMMS8
3464 <tr><td>QASYMMU8
3465 <tr><td>QSYMMS8
3466 <tr><td>QSYMMS16
3467 <tr><td>SIGNED32
Teresa Charlin79a06a52023-07-13 17:16:45 +01003468 </table>
3469<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01003470 <td rowspan="3">TransposeConvolution2dLayer
3471 <td rowspan="3" style="width:200px;"> Layer to perform 2D transpose convolution (deconvolution) operation.
3472 <td rowspan="3">
3473 <ul>
3474 <li>ANEURALNETWORKS_TRANSPOSE_CONV_2D
3475 </ul>
3476 <td>CpuRef
3477 <td>
3478 <ul>
3479 <li>All
3480 </ul>
3481 <td>
3482 <table>
3483 <tr><th>
3484 <tr><td>BFLOAT16
3485 <tr><td>FLOAT16
3486 <tr><td>FLOAT32
3487 <tr><td>QASYMMS8
3488 <tr><td>QASYMMU8
3489 <tr><td>QSYMMS8
3490 <tr><td>QSYMMS16
3491 </table>
3492<tr>
3493 <td>CpuAcc
3494 <td>
3495 <ul>
3496 <li>NHWC
3497 <li>NCHW
3498 </ul>
3499 <td>
3500 <table>
3501 <tr><th>
3502 <tr><td>SIGNED32
3503 <tr><td>FLOAT16
3504 <tr><td>FLOAT32
3505 <tr><td>QASYMMU8
3506 <tr><td>QASYMMS8
3507 <tr><td>QUANTIZEDSYMM8PERAXIS
3508 </table>
3509<tr>
3510 <td>GpuAcc
3511 <td>
3512 <ul>
3513 <li>NHWC
3514 <li>NCHW
3515 </ul>
3516 <td>
3517 <table>
3518 <tr><th>
3519 <tr><td>SIGNED32
3520 <tr><td>FLOAT16
3521 <tr><td>FLOAT32
3522 <tr><td>QASYMMU8
3523 <tr><td>QASYMMS8
3524 <tr><td>QUANTIZEDSYMM8PERAXIS
3525 </table>
3526<tr>
3527 <td rowspan="3">TransposeLayer
3528 <td rowspan="3" style="width:200px;"> Layer to transpose a tensor.
3529 <td rowspan="3">
3530 <ul>
3531 <li>ANEURALNETWORKS_TRANSPOSE
3532 </ul>
3533 <td>CpuRef
3534 <td>
3535 <ul>
3536 <li>All
3537 </ul>
3538 <td>
3539 <table>
3540 <tr><th>
3541 <tr><td>BFLOAT16
3542 <tr><td>FLOAT16
3543 <tr><td>FLOAT32
3544 <tr><td>QASYMMS8
3545 <tr><td>QASYMMU8
3546 <tr><td>QSYMMS16
3547 </table>
3548<tr>
3549 <td>CpuAcc
3550 <td>
3551 <ul>
3552 <li>All
3553 </ul>
3554 <td>
3555 <table>
3556 <tr><th>
3557 <tr><td>All
3558 </table>
3559<tr>
3560 <td>GpuAcc
3561 <td>
3562 <ul>
3563 <li>All
3564 </ul>
3565 <td>
3566 <table>
3567 <tr><th>
3568 <tr><td>All
3569 </table>
3570<tr>
3571 <td rowspan="3">UnidirectionalSquenceLstmLayer
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01003572 <td rowspan="3" style="width:200px;"> Layer to perform unidirectional sequence LSTM operation.
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01003573 <td rowspan="3">
3574 <ul>
3575 <li>ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM
3576 </ul>
3577 <td>CpuRef
3578 <td>
3579 <ul>
3580 <li>All
3581 </ul>
3582 <td>
3583 <table>
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01003584 <tr><th>Input Types
3585 <tr><td>FLOAT32
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01003586 </table>
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01003587 <table>
3588 <tr><th>Weight Types
3589 <tr><td>FLOAT32
3590 <tr><td>QASYMMS8
3591 </table>
Nikhil Raj930e1a22023-06-08 09:49:46 +01003592<tr>
Cathal Corbettfd5bec42022-03-03 15:13:23 +00003593 <td>CpuAcc
3594 <td>
3595 <ul>
3596 <li>All
3597 </ul>
3598 <td>
3599 <table>
3600 <tr><th>Input Types
3601 <tr><td>FLOAT32
3602 </table>
3603 <table>
3604 <tr><th>Weight Types
3605 <tr><td>FLOAT32
3606 </table>
Nikhil Raj930e1a22023-06-08 09:49:46 +01003607<tr>
Cathal Corbett4952a3e2022-03-03 15:14:18 +00003608 <td>GpuAcc
3609 <td>
3610 <ul>
3611 <li>All
3612 </ul>
3613 <td>
3614 <table>
3615 <tr><th>Input Types
3616 <tr><td>FLOAT32
3617 </table>
3618 <table>
3619 <tr><th>Weight Types
3620 <tr><td>FLOAT32
3621 </table>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01003622<tr>
3623 <td rowspan="3">UnmapLayer
3624 <td rowspan="3" style="width:200px;"> Layer to perform unmap operation on tensor.
3625 <td rowspan="3">
3626 <ul>
3627 <li>N/A
3628 </ul>
3629 <td>CpuRef
3630 <td>
3631 <ul>
3632 <li>All
3633 </ul>
3634 <td>
3635 <table>
3636 <tr><th>
3637 <tr><td>All
3638 </table>
3639<tr>
3640 <td>CpuAcc
3641 <td>
3642 <ul>
3643 <li>NHWC
3644 <li>NCHW
3645 </ul>
3646 <td>
3647 <table>
3648 <tr><th>
3649 <tr><td>All
3650 </table>
3651<tr>
3652 <td>GpuAcc
3653 <td>
3654 <ul>
3655 <li>NHWC
3656 <li>NCHW
3657 </ul>
3658 <td>
3659 <table>
3660 <tr><th>
3661 <tr><td>All
3662 </table>
3663</table>
3664
3665*/
Tracy Narine944fb502023-07-04 15:08:57 +01003666} // namespace