blob: 16b0f0b14c5dd6a98fec65a1d0fbb5e4332a078c [file] [log] [blame]
Teresa Charlin1fe6c812022-11-01 15:59:50 +00001/// Copyright (c) 2021, 2023 ARM Limited and Contributors. All rights reserved.
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002///
3/// SPDX-License-Identifier: MIT
4///
5
6namespace armnn
7{
8/**
9@page operator_list Arm NN Operators
10
11@tableofcontents
12
13@section S5_1_operator_list Arm NN Operators
14
15Arm NN supports operators that are listed in below table.
16
17Arm NN supports a wide list of data-types.
18The main data-types that the Machine Learning functions support are the following:
19 <ul>
20 <li><b>BFLOAT16:</b> 16-bit non-standard brain floating point
21 <li><b>QASYMMU8:</b> 8-bit unsigned asymmetric quantized
22 <li><b>QASYMMS8:</b> 8-bit signed asymmetric quantized
23 <li><b>QUANTIZEDSYMM8PERAXIS:</b> 8-bit signed symmetric quantized
Cathal Corbettb85113e2022-02-22 11:51:43 +000024 <li><b>QSYMMS8:</b> 8-bit signed symmetric quantized
25 <li><b>QSYMMS16:</b> 16-bit signed symmetric quantized
Sadik Armagan1a9c9f62021-08-05 09:25:15 +010026 <li><b>FLOAT32:</b> 32-bit single precision floating point
27 <li><b>FLOAT16:</b> 16-bit half precision floating point
28 <li><b>SIGNED32:</b> 32-bit signed integer
29 <li><b>BOOLEAN:</b> 8-bit unsigned char
30 <li><b>All:</b> Agnostic to any specific data type
31 </ul>
32
33Arm NN supports the following data layouts (fast changing dimension from right to left):
34 <ul>
35 <li><b>NHWC:</b> Layout where channels are in the fastest changing dimension
36 <li><b>NCHW:</b> Layout where width is in the fastest changing dimension
37 <li><b>All:</b> Agnostic to any specific data layout
38 </ul>
39where N = batches, C = channels, H = height, W = width
40
41<table>
42<caption id="multi_row"></caption>
43<tr>
44 <th>Operator
45 <th>Description
46 <th>Equivalent Android NNAPI Operator
47 <th>Backends
48 <th>Data Layouts
49 <th>Data Types
50<tr>
51 <td rowspan="3">AbsLayer
52 <td rowspan="3"> Layer to perform absolute operation.
53 <td rowspan="3">
54 <ul>
55 <li>ANEURALNETWORKS_ABS
56 </ul>
57 <td>CpuRef
58 <td>
59 <ul>
60 <li>All
61 </ul>
62 <td>
63 <table>
64 <tr><th>
65 <tr><td>BFLOAT16
66 <tr><td>FLOAT16
67 <tr><td>FLOAT32
68 <tr><td>QASYMMS8
69 <tr><td>QASYMMU8
70 <tr><td>QSYMMS16
71 <tr><td>SIGNED32
72 </table>
73<tr>
74 <td>CpuAcc
75 <td>
76 <ul>
77 <li>All
78 </ul>
79 <td>
80 <table>
81 <tr><th>
82 <tr><td>FLOAT16
83 <tr><td>FLOAT32
84 <tr><td>SIGNED32
85 </table>
86<tr>
87 <td>GpuAcc
88 <td>
89 <ul>
90 <li>All
91 </ul>
92 <td>
93 <table>
94 <tr><th>
95 <tr><td>FLOAT16
96 <tr><td>FLOAT32
97 </table>
98<tr>
99 <td rowspan="3">ActivationLayer
100 <td rowspan="3" style="width:200px;"> Layer to simulate an activation layer with the specified activation function.
101 <td rowspan="3">
102 <ul>
103 <li>ANEURALNETWORKS_ABS
104 <li>ANEURALNETWORKS_ELU
105 <li>ANEURALNETWORKS_HARD_SWISH
106 <li>ANEURALNETWORKS_LOGISTIC
107 <li>ANEURALNETWORKS_PRELU
108 <li>ANEURALNETWORKS_RELU
109 <li>ANEURALNETWORKS_RELU1
110 <li>ANEURALNETWORKS_RELU6
111 <li>ANEURALNETWORKS_SQRT
112 <li>ANEURALNETWORKS_TANH
113 </ul>
114 <td>CpuRef
115 <td>
116 <ul>
117 <li>All
118 </ul>
119 <td>
120 <table>
121 <tr><th>
122 <tr><td>BFLOAT16
123 <tr><td>FLOAT16
124 <tr><td>FLOAT32
125 <tr><td>QASYMMS8
126 <tr><td>QASYMMU8
127 <tr><td>QSYMMS16
128 </table>
129<tr>
130 <td>CpuAcc
131 <td>
132 <ul>
133 <li>All
134 </ul>
135 <td>
136 <table>
137 <tr><th>
138 <tr><td>QASYMMU8
139 <tr><td>QASYMMS8
140 <tr><td>QSYMMS16
141 <tr><td>FLOAT16
142 <tr><td>FLOAT32
143 </table>
144<tr>
145 <td>GpuAcc
146 <td>
147 <ul>
148 <li>All
149 </ul>
150 <td>
151 <table>
152 <tr><th>
153 <tr><td>QASYMMU8
154 <tr><td>QASYMMS8
155 <tr><td>QSYMMS16
156 <tr><td>FLOAT16
157 <tr><td>FLOAT32
158 </table>
159<tr>
160 <td rowspan="3">AdditionLayer
161 <td rowspan="3" style="width:200px;"> Layer to add 2 tensors.
162 <td rowspan="3">
163 <ul>
164 <li>ANEURALNETWORKS_ADD
165 </ul>
166 <td>CpuRef
167 <td>
168 <ul>
169 <li>All
170 </ul>
171 <td>
172 <table>
173 <tr><th>
174 <tr><td>BFLOAT16
175 <tr><td>FLOAT16
176 <tr><td>FLOAT32
177 <tr><td>QASYMMS8
178 <tr><td>QASYMMU8
179 <tr><td>QSYMMS16
180 <tr><td>SIGNED32
181 </table>
182<tr>
183 <td>CpuAcc
184 <td>
185 <ul>
186 <li>All
187 </ul>
188 <td>
189 <table>
190 <tr><th>
191 <tr><td>QASYMMU8
192 <tr><td>QASYMMS8
193 <tr><td>QSYMMS16
194 <tr><td>SIGNED32
195 <tr><td>FLOAT16
196 <tr><td>FLOAT32
197 </table>
198<tr>
199 <td>GpuAcc
200 <td>
201 <ul>
202 <li>All
203 </ul>
204 <td>
205 <table>
206 <tr><th>
207 <tr><td>QASYMMU8
208 <tr><td>QASYMMS8
209 <tr><td>QSYMMS16
210 <tr><td>SIGNED32
211 <tr><td>FLOAT16
212 <tr><td>FLOAT32
213 </table>
214<tr>
215 <td rowspan="3">ArgMinMaxLayer
216 <td rowspan="3" style="width:200px;"> Layer to calculate the index of the minimum or maximum values in a tensor
217 based on an axis.
218 <td rowspan="3">
219 <ul>
220 <li>ANEURALNETWORKS_ARGMAX
221 <li>ANEURALNETWORKS_ARGMIN
222 </ul>
223 <td>CpuRef
224 <td>
225 <ul>
226 <li>All
227 </ul>
228 <td>
229 <table>
230 <tr><th>
231 <tr><td>BFLOAT16
232 <tr><td>FLOAT16
233 <tr><td>FLOAT32
234 <tr><td>QASYMMS8
235 <tr><td>QASYMMU8
236 <tr><td>QSYMMS16
237 <tr><td>SIGNED32
238 <tr><td>SIGNED64
239 </table>
240<tr>
241 <td>CpuAcc
242 <td>
243 <ul>
244 <li>All
245 </ul>
246 <td>
247 <table>
248 <tr><th>
249 <tr><td>QASYMMU8
250 <tr><td>QASYMMS8
251 <tr><td>SIGNED32
252 <tr><td>FLOAT16
253 <tr><td>FLOAT32
254 </table>
255<tr>
256 <td>GpuAcc
257 <td>
258 <ul>
259 <li>All
260 </ul>
261 <td>
262 <table>
263 <tr><th>
264 <tr><td>QASYMMU8
265 <tr><td>QASYMMS8
266 <tr><td>SIGNED32
267 <tr><td>FLOAT16
268 <tr><td>FLOAT32
269 </table>
270<tr>
Samuel Yap6b478092022-07-06 15:36:03 +0100271 <td rowspan="3">BatchMatMulLayer
272 <td rowspan="3" style="width:200px;"> Layer to perform batch matrix multiplication.
273 <td rowspan="3">
274 <ul>
275 <li>N/A
276 </ul>
277 <td>CpuRef
278 <td>
279 <ul>
280 <li>All
281 </ul>
282 <td>
283 <table>
284 <tr><th>
285 <tr><td>BFLOAT16
286 <tr><td>FLOAT16
287 <tr><td>FLOAT32
288 <tr><td>QASYMMS8
289 <tr><td>QASYMMU8
290 <tr><td>QSYMMS16
291 </table>
292<tr>
293 <td>CpuAcc
294 <td>
295 <ul>
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100296 <li>All
Samuel Yap6b478092022-07-06 15:36:03 +0100297 </ul>
298 <td>
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100299 <table>
300 <tr><th>
301 <tr><td>FLOAT32
Teresa Charlin1fe6c812022-11-01 15:59:50 +0000302 <tr><td>QASYMMS8
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100303 </table>
Samuel Yap6b478092022-07-06 15:36:03 +0100304<tr>
305 <td>GpuAcc
306 <td>
307 <ul>
Teresa Charlin94916a52022-10-19 08:48:07 +0100308 <li>All
Samuel Yap6b478092022-07-06 15:36:03 +0100309 </ul>
310 <td>
Teresa Charlin94916a52022-10-19 08:48:07 +0100311 <table>
312 <tr><th>
313 <tr><td>FLOAT32
Teresa Charlin97a3aef2023-01-10 10:32:51 +0000314 <tr><td>QASYMMS8
Teresa Charlin94916a52022-10-19 08:48:07 +0100315 </table>
Samuel Yap6b478092022-07-06 15:36:03 +0100316<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100317 <td rowspan="3">BatchNormalizationLayer
318 <td rowspan="3" style="width:200px;"> Layer to perform batch normalization.
319 <td rowspan="3">
320 <ul>
321 <li>N/A
322 </ul>
323 <td>CpuRef
324 <td>
325 <ul>
326 <li>All
327 </ul>
328 <td>
329 <table>
330 <tr><th>
331 <tr><td>BFLOAT16
332 <tr><td>FLOAT16
333 <tr><td>FLOAT32
334 <tr><td>QASYMMS8
335 <tr><td>QASYMMU8
336 <tr><td>QSYMMS16
337 </table>
338<tr>
339 <td>CpuAcc
340 <td>
341 <ul>
342 <li>NHWC
343 <li>NCHW
344 </ul>
345 <td>
346 <table>
347 <tr><th>
348 <tr><td>FLOAT32
349 <tr><td>FLOAT16
350 </table>
351<tr>
352 <td>GpuAcc
353 <td>
354 <ul>
355 <li>NHWC
356 <li>NCHW
357 </ul>
358 <td>
359 <table>
360 <tr><th>
361 <tr><td>FLOAT32
362 <tr><td>FLOAT16
363 </table>
364<tr>
365 <td rowspan="3">BatchToSpaceNdLayer
366 <td rowspan="3" style="width:200px;"> Layer to perform a batch to space transformation.
367 <td rowspan="3">
368 <ul>
369 <li>ANEURALNETWORKS_BATCH_TO_SPACE_ND
370 </ul>
371 <td>CpuRef
372 <td>
373 <ul>
374 <li>All
375 </ul>
376 <td>
377 <table>
378 <tr><th>
379 <tr><td>BFLOAT16
380 <tr><td>FLOAT16
381 <tr><td>FLOAT32
382 <tr><td>QASYMMS8
383 <tr><td>QASYMMU8
384 <tr><td>QSYMMS16
385 </table>
386<tr>
387 <td>CpuAcc
388 <td>
389 <ul>
390 <li>NHWC
391 <li>NCHW
392 </ul>
393 <td>
394 <table>
395 <tr><th>
396 <tr><td>All
397 </table>
398<tr>
399 <td>GpuAcc
400 <td>
401 <ul>
402 <li>NHWC
403 <li>NCHW
404 </ul>
405 <td>
406 <table>
407 <tr><th>
408 <tr><td>All
409 </table>
410<tr>
411 <td rowspan="3">CastLayer
412 <td rowspan="3" style="width:200px;"> Layer to cast a tensor to a type.
413 <td rowspan="3">
414 <ul>
415 <li>ANEURALNETWORKS_CAST
416 </ul>
417 <td>CpuRef
418 <td>
419 <ul>
420 <li>All
421 </ul>
422 <td>
423 <table>
424 <tr><th>
425 <tr><td>BFLOAT16
426 <tr><td>FLOAT16
427 <tr><td>FLOAT32
428 <tr><td>QSYMMS8
429 <tr><td>QASYMMS8
430 <tr><td>QASYMMU8
431 <tr><td>QSYMMS16
432 <tr><td>SIGNED32
433 </table>
434<tr>
435 <td>CpuAcc
436 <td>
437 <ul>
438 <li>All
439 </ul>
440 <td>
441 <table>
442 <tr><th>
443 <tr><td>QASYMMS8
444 <tr><td>QASYMMU8
445 <tr><td>FLOAT16
446 <tr><td>SIGNED32
447 <tr><td>FLOAT32
448 </table>
449<tr>
450 <td>GpuAcc
451 <td>
452 <ul>
453 <li>All
454 </ul>
455 <td>
456 <table>
457 <tr><th>
458 <tr><td>QASYMMS8
459 <tr><td>QASYMMU8
460 <tr><td>SIGNED32
461 <tr><td>FLOAT16
462 <tr><td>FLOAT32
463 </table>
464<tr>
Teresa Charlincd203852021-09-24 18:15:39 +0100465 <td rowspan="3">ChannelShuffleLayer
466 <td rowspan="3" style="width:200px;"> Layer to reorganize the channels of a tensor.
467 <td rowspan="3">
468 <ul>
469 <li>ANEURALNETWORKS_CHANNEL_SHUFFLE
470 </ul>
471 <td>CpuRef
472 <td>
473 <ul>
474 <li>All
475 </ul>
476 <td>
477 <table>
478 <tr><th>
479 <tr><td>FLOAT16
480 <tr><td>FLOAT32
481 <tr><td>QSYMMS8
482 <tr><td>QASYMMS8
483 <tr><td>QASYMMU8
484 </table>
485<tr>
486 <td>CpuAcc
487 <td>
488 <ul>
489 <li>All
490 </ul>
491 <td>
492 <table>
493 <tr><th>
494 <tr><td>QASYMMS8
495 <tr><td>QASYMMU8
496 <tr><td>FLOAT16
497 <tr><td>FLOAT32
498 </table>
499<tr>
500 <td>GpuAcc
501 <td>
502 <ul>
503 <li>All
504 </ul>
505 <td>
506 <table>
507 <tr><th>
508 <tr><td>QASYMMS8
509 <tr><td>QASYMMU8
510 <tr><td>FLOAT16
511 <tr><td>FLOAT32
512 </table>
513<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100514 <td rowspan="3">ComparisonLayer
515 <td rowspan="3" style="width:200px;"> Layer to compare 2 tensors.
516 <td rowspan="3">
517 <ul>
518 <li>ANEURALNETWORKS_EQUAL
519 <li>ANEURALNETWORKS_GREATER
520 <li>ANEURALNETWORKS_GREATER_EQUAL
521 <li>ANEURALNETWORKS_LESS
522 <li>ANEURALNETWORKS_LESS_EQUAL
523 <li>ANEURALNETWORKS_NOT_EQUAL
524 </ul>
525 <td>CpuRef
526 <td>
527 <ul>
528 <li>All
529 </ul>
530 <td>
531 <table>
532 <tr><th>
533 <tr><td>BFLOAT16
534 <tr><td>FLOAT16
535 <tr><td>FLOAT32
536 <tr><td>BOOLEAN
537 <tr><td>QASYMMS8
538 <tr><td>QASYMMU8
539 <tr><td>QSYMMS16
540 <tr><td>SIGNED32
541 </table>
542<tr>
543 <td>CpuAcc
544 <td>
545 <ul>
546 <li>All
547 </ul>
548 <td>
549 <table>
550 <tr><th>
551 <tr><td>All
552 </table>
553<tr>
554 <td>GpuAcc
555 <td>
556 <ul>
557 <li>All
558 </ul>
559 <td>
560 <table>
561 <tr><th>
562 <tr><td>All
563 </table>
564<tr>
565 <td rowspan="3">ConcatLayer
566 <td rowspan="3" style="width:200px;"> Layer to concatenate tensors along a given axis.
567 <td rowspan="3">
568 <ul>
569 <li>ANEURALNETWORKS_CONCATENATION
570 </ul>
571 <td>CpuRef
572 <td>
573 <ul>
574 <li>All
575 </ul>
576 <td>
577 <table>
578 <tr><th>
579 <tr><td>BFLOAT16
580 <tr><td>FLOAT16
581 <tr><td>FLOAT32
582 <tr><td>QASYMMS8
583 <tr><td>QASYMMU8
584 <tr><td>QSYMMS16
585 </table>
586<tr>
587 <td>CpuAcc
588 <td>
589 <ul>
590 <li>All
591 </ul>
592 <td>
593 <table>
594 <tr><th>
595 <tr><td>QASYMMU8
596 <tr><td>QASYMMS8
597 <tr><td>FLOAT16
598 <tr><td>FLOAT32
599 </table>
600<tr>
601 <td>GpuAcc
602 <td>
603 <ul>
604 <li>All
605 </ul>
606 <td>
607 <table>
608 <tr><th>
609 <tr><td>QASYMMU8
610 <tr><td>QASYMMS8
611 <tr><td>FLOAT16
612 <tr><td>FLOAT32
613 </table>
614<tr>
615 <td rowspan="3">ConstantLayer
616 <td rowspan="3" style="width:200px;"> Layer to provide a constant tensor.
617 <td rowspan="3">
618 <ul>
619 <li>N/A
620 </ul>
621 <td>CpuRef
622 <td>
623 <ul>
624 <li>All
625 </ul>
626 <td>
627 <table>
628 <tr><th>
629 <tr><td>BFLOAT16
630 <tr><td>FLOAT16
631 <tr><td>FLOAT32
632 <tr><td>QASYMMS8
633 <tr><td>QASYMMU8
634 <tr><td>QSYMMS8
635 <tr><td>QSYMMS16
636 <tr><td>SIGNED32
637 </table>
638<tr>
639 <td>CpuAcc
640 <td>
641 <ul>
642 <li>All
643 </ul>
644 <td>
645 <table>
646 <tr><th>
647 <tr><td>All
648 </table>
649<tr>
650 <td>GpuAcc
651 <td>
652 <ul>
653 <li>All
654 </ul>
655 <td>
656 <table>
657 <tr><th>
658 <tr><td>All
659 </table>
660<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100661 <td rowspan="3">ConvertFp16ToFp32Layer
662 <td rowspan="3" style="width:200px;"> Layer to convert Float16 tensor to Float32 tensor.
663 <td rowspan="3">
664 <ul>
665 <li>N/A
666 </ul>
667 <td>CpuRef
668 <td>
669 <ul>
670 <li>All
671 </ul>
672 <td>
673 <table>
674 <tr><th>
675 <tr><td>FLOAT16
676 <tr><td>FLOAT32
677 </table>
678<tr>
679 <td>CpuAcc
680 <td>
681 <ul>
682 <li>All
683 </ul>
684 <td>
685 <table>
686 <tr><th>
687 <tr><td>FLOAT16
688 <tr><td>FLOAT32
689 </table>
690<tr>
691 <td>GpuAcc
692 <td>
693 <ul>
694 <li>All
695 </ul>
696 <td>
697 <table>
698 <tr><th>
699 <tr><td>FLOAT16
700 <tr><td>FLOAT32
701 </table>
702<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100703 <td rowspan="3">ConvertFp32ToFp16Layer
704 <td rowspan="3" style="width:200px;"> Layer to convert Float32 tensor to Float16 tensor.
705 <td rowspan="3">
706 <ul>
707 <li>N/A
708 </ul>
709 <td>CpuRef
710 <td>
711 <ul>
712 <li>All
713 </ul>
714 <td>
715 <table>
716 <tr><th>
717 <tr><td>FLOAT16
718 <tr><td>FLOAT32
719 </table>
720<tr>
721 <td>CpuAcc
722 <td>
723 <ul>
724 <li>All
725 </ul>
726 <td>
727 <table>
728 <tr><th>
729 <tr><td>FLOAT16
730 <tr><td>FLOAT32
731 </table>
732<tr>
733 <td>GpuAcc
734 <td>
735 <ul>
736 <li>All
737 </ul>
738 <td>
739 <table>
740 <tr><th>
741 <tr><td>FLOAT16
742 <tr><td>FLOAT32
743 </table>
744<tr>
745 <td rowspan="3">Convolution2dLayer
746 <td rowspan="3" style="width:200px;"> Layer to compute a convolution operation.
747 <td rowspan="3">
748 <ul>
749 <li>ANEURALNETWORKS_CONV_2D
750 <li>ANEURALNETWORKS_GROUPED_CONV_2D
751 </ul>
752 <td>CpuRef
753 <td>
754 <ul>
755 <li>All
756 </ul>
757 <td>
758 <table>
759 <tr><th>
760 <tr><td>BFLOAT16
761 <tr><td>FLOAT16
762 <tr><td>FLOAT32
763 <tr><td>QASYMMS8
764 <tr><td>QASYMMU8
765 <tr><td>QSYMMS16
766 </table>
767<tr>
768 <td>CpuAcc
769 <td>
770 <ul>
771 <li>NHWC
772 <li>NCHW
773 </ul>
774 <td>
775 <table>
776 <tr><th>
777 <tr><td>SIGNED32
778 <tr><td>FLOAT16
779 <tr><td>FLOAT32
780 <tr><td>QASYMMU8
781 <tr><td>QASYMMS8
782 <tr><td>QUANTIZEDSYMM8PERAXIS
783 </table>
784<tr>
785 <td>GpuAcc
786 <td>
787 <ul>
788 <li>NHWC
789 <li>NCHW
790 </ul>
791 <td>
792 <table>
793 <tr><th>
794 <tr><td>SIGNED32
795 <tr><td>FLOAT16
796 <tr><td>FLOAT32
797 <tr><td>QASYMMU8
798 <tr><td>QASYMMS8
799 <tr><td>QUANTIZEDSYMM8PERAXIS
800 </table>
801<tr>
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100802 <td rowspan="3">Convolution3dLayer
803 <td rowspan="3" style="width:200px;"> Layer to compute a 3D convolution operation.
804 <td rowspan="3">
805 <ul>
806 <li>N/A
807 </ul>
808 <td>CpuRef
809 <td>
810 <ul>
811 <li>NDHWC
812 </ul>
813 <td>
814 <table>
815 <tr><th>
816 <tr><td>BFLOAT16
817 <tr><td>FLOAT16
818 <tr><td>FLOAT32
819 <tr><td>QASYMMS8
820 <tr><td>QASYMMU8
821 <tr><td>QSYMMS8
822 <tr><td>QSYMMS16
823 </table>
824<tr>
825 <td>CpuAcc
826 <td>
827 <ul>
828 <li>N/A
829 </ul>
830 <td>
831 <ul>
832 <li>N/A
833 </ul>
834<tr>
835 <td>GpuAcc
836 <td>
837 <ul>
838 <li>N/A
839 </ul>
840 <td>
841 <ul>
842 <li>N/A
843 </ul>
844<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +0100845 <td rowspan="1">DebugLayer
846 <td rowspan="1" style="width:200px;"> Layer to print out inter layer tensor information.
847 <td rowspan="1">
848 <ul>
849 <li>N/A
850 </ul>
851 <td>CpuRef
852 <td>
853 <ul>
854 <li>All
855 </ul>
856 <td>
857 <table>
858 <tr><th>
859 <tr><td>BFLOAT16
860 <tr><td>FLOAT16
861 <tr><td>FLOAT32
862 <tr><td>QASYMMS8
863 <tr><td>QASYMMU8
864 <tr><td>QSYMMS8
865 <tr><td>QSYMMS16
866 <tr><td>SIGNED32
867 </table>
868<tr>
869 <td rowspan="3">DepthToSpaceLayer
870 <td rowspan="3" style="width:200px;"> Layer to perform Depth to Space transformation.
871 <td rowspan="3">
872 <ul>
873 <li>ANEURALNETWORKS_DEPTH_TO_SPACE
874 </ul>
875 <td>CpuRef
876 <td>
877 <ul>
878 <li>All
879 </ul>
880 <td>
881 <table>
882 <tr><th>
883 <tr><td>BFLOAT16
884 <tr><td>FLOAT16
885 <tr><td>FLOAT32
886 <tr><td>QASYMMS8
887 <tr><td>QASYMMU8
888 <tr><td>QSYMMS16
889 </table>
890<tr>
891 <td>CpuAcc
892 <td>
893 <ul>
894 <li>NHWC
895 <li>NCHW
896 </ul>
897 <td>
898 <table>
899 <tr><th>
900 <tr><td>All
901 </table>
902<tr>
903 <td>GpuAcc
904 <td>
905 <ul>
906 <li>NHWC
907 <li>NCHW
908 </ul>
909 <td>
910 <table>
911 <tr><th>
912 <tr><td>All
913 </table>
914<tr>
915 <td rowspan="3">DepthwiseConvolution2dLayer
916 <td rowspan="3" style="width:200px;"> Layer to compute a deconvolution or transpose convolution.
917 <td rowspan="3">
918 <ul>
919 <li>ANEURALNETWORKS_DEPTHWISE_CONV_2D
920 </ul>
921 <td>CpuRef
922 <td>
923 <ul>
924 <li>All
925 </ul>
926 <td>
927 <table>
928 <tr><th>
929 <tr><td>BFLOAT16
930 <tr><td>FLOAT16
931 <tr><td>FLOAT32
932 <tr><td>QASYMMS8
933 <tr><td>QASYMMU8
934 <tr><td>QSYMMS8
935 <tr><td>QSYMMS16
936 </table>
937<tr>
938 <td>CpuAcc
939 <td>
940 <ul>
941 <li>NHWC
942 <li>NCHW
943 </ul>
944 <td>
945 <table>
946 <tr><th>
947 <tr><td>FLOAT16
948 <tr><td>FLOAT32
949 <tr><td>SIGNED32
950 <tr><td>QASYMMU8
951 <tr><td>QASYMMS8
952 <tr><td>QUANTIZEDSYMM8PERAXIS
953 </table>
954<tr>
955 <td>GpuAcc
956 <td>
957 <ul>
958 <li>NHWC
959 <li>NCHW
960 </ul>
961 <td>
962 <table>
963 <tr><th>
964 <tr><td>FLOAT16
965 <tr><td>FLOAT32
966 <tr><td>SIGNED32
967 <tr><td>QASYMMU8
968 <tr><td>QASYMMS8
969 <tr><td>QUANTIZEDSYMM8PERAXIS
970 </table>
971<tr>
972 <td rowspan="3">DequantizeLayer
973 <td rowspan="3" style="width:200px;"> Layer to dequantize the values in a tensor.
974 <td rowspan="3">
975 <ul>
976 <li>ANEURALNETWORKS_DEQUANTIZE
977 </ul>
978 <td>CpuRef
979 <td>
980 <ul>
981 <li>All
982 </ul>
983 <td>
984 <table>
985 <tr><th>
986 <tr><td>QASYMMS8
987 <tr><td>QASYMMU8
988 <tr><td>QSYMMS8
989 <tr><td>QSYMMS16
990 </table>
991<tr>
992 <td>CpuAcc
993 <td>
994 <ul>
995 <li>All
996 </ul>
997 <td>
998 <table>
999 <tr><th>
1000 <tr><td>FLOAT16
1001 <tr><td>FLOAT32
1002 <tr><td>QASYMMU8
1003 <tr><td>QASYMMS8
1004 <tr><td>QUANTIZEDSYMM8PERAXIS
1005 <tr><td>QSYMMS8
1006 <tr><td>QSYMMS16
1007 </table>
1008<tr>
1009 <td>GpuAcc
1010 <td>
1011 <ul>
1012 <li>All
1013 </ul>
1014 <td>
1015 <table>
1016 <tr><th>
1017 <tr><td>FLOAT16
1018 <tr><td>FLOAT32
1019 <tr><td>QASYMMU8
1020 <tr><td>QASYMMS8
1021 <tr><td>QUANTIZEDSYMM8PERAXIS
1022 <tr><td>QSYMMS8
1023 <tr><td>QSYMMS16
1024 </table>
1025<tr>
1026 <td rowspan="2">DetectionPostProcessLayer
1027 <td rowspan="2" style="width:200px;"> Layer to generate the detection output based on center size encoded boxes, class prediction and anchors by doing non maximum suppression (NMS).
1028 <td rowspan="2">
1029 <ul>
1030 <li>ANEURALNETWORKS_DETECTION_POSTPROCESSING
1031 </ul>
1032 <td>CpuRef
1033 <td>
1034 <ul>
1035 <li>All
1036 </ul>
1037 <td>
1038 <table>
1039 <tr><th>
1040 <tr><td>BFLOAT16
1041 <tr><td>FLOAT16
1042 <tr><td>FLOAT32
1043 <tr><td>QASYMMS8
1044 <tr><td>QASYMMU8
1045 <tr><td>QSYMMS16
1046 </table>
1047<tr>
1048 <td>CpuAcc
1049 <td>
1050 <ul>
1051 <li>All
1052 </ul>
1053 <td>
1054 <table>
1055 <tr><th>
1056 <tr><td>QASYMMU8
1057 <tr><td>QASYMMS8
1058 <tr><td>FLOAT32
1059 </table>
1060<tr>
1061 <td rowspan="3">DivisionLayer
1062 <td rowspan="3" style="width:200px;"> Layer to divide 2 tensors.
1063 <td rowspan="3">
1064 <ul>
1065 <li>ANEURALNETWORKS_DIV
1066 </ul>
1067 <td>CpuRef
1068 <td>
1069 <ul>
1070 <li>All
1071 </ul>
1072 <td>
1073 <table>
1074 <tr><th>
1075 <tr><td>BFLOAT16
1076 <tr><td>FLOAT16
1077 <tr><td>FLOAT32
1078 <tr><td>QASYMMS8
1079 <tr><td>QASYMMU8
1080 <tr><td>QSYMMS16
1081 <tr><td>SIGNED32
1082 </table>
1083<tr>
1084 <td>CpuAcc
1085 <td>
1086 <ul>
1087 <li>All
1088 </ul>
1089 <td>
1090 <table>
1091 <tr><th>
1092 <tr><td>FLOAT16
1093 <tr><td>FLOAT32
1094 </table>
1095<tr>
1096 <td>GpuAcc
1097 <td>
1098 <ul>
1099 <li>All
1100 </ul>
1101 <td>
1102 <table>
1103 <tr><th>
1104 <tr><td>FLOAT16
1105 <tr><td>FLOAT32
1106 </table>
1107<tr>
1108 <td rowspan="3">ElementwiseBaseLayer
1109 <td rowspan="3" style="width:200px;"> Layer to perform Add - Div - Max - Min - Mul operations.
1110 <td rowspan="3">
1111 <ul>
1112 <li>ANEURALNETWORKS_ADD
1113 <li>ANEURALNETWORKS_DIV
1114 <li>ANEURALNETWORKS_MAXIMUM
1115 <li>ANEURALNETWORKS_MINIMUM
1116 <li>ANEURALNETWORKS_MUL
1117 </ul>
1118 <td>CpuRef
1119 <td>
1120 <ul>
1121 <li>All
1122 </ul>
1123 <td>
1124 <table>
1125 <tr><th>
1126 <tr><td>BFLOAT16
1127 <tr><td>FLOAT16
1128 <tr><td>FLOAT32
1129 <tr><td>QASYMMS8
1130 <tr><td>QASYMMU8
1131 <tr><td>QSYMMS16
1132 <tr><td>SIGNED32
1133 </table>
1134<tr>
1135 <td>CpuAcc
1136 <td>
1137 <ul>
1138 <li>All
1139 </ul>
1140 <td>
1141 <table>
1142 <tr><th>
1143 <tr><td>QASYMMU8
1144 <tr><td>QASYMMS8
1145 <tr><td>QSYMMS16
1146 <tr><td>SIGNED32
1147 <tr><td>FLOAT16
1148 <tr><td>FLOAT32
1149 </table>
1150<tr>
1151 <td>GpuAcc
1152 <td>
1153 <ul>
1154 <li>All
1155 </ul>
1156 <td>
1157 <table>
1158 <tr><th>
1159 <tr><td>QASYMMU8
1160 <tr><td>QASYMMS8
1161 <tr><td>QSYMMS16
1162 <tr><td>SIGNED32
1163 <tr><td>FLOAT16
1164 <tr><td>FLOAT32
1165 </table>
1166<tr>
John Mcloughlin0ec00872023-05-15 17:03:49 +01001167 <td rowspan="3">ElementwiseBinaryLayer
1168 <td rowspan="3" style="width:200px;"> Layer to perform Power and Square Difference operations.
1169 <td rowspan="3">
1170 <ul>
1171 <li>ANEURALNETWORKS_POW
1172 </ul>
1173 <td>CpuRef
1174 <td>
1175 <ul>
1176 <li>All
1177 </ul>
1178 <td>
1179 <table>
1180 <tr><th>
1181 <tr><td>FLOAT16
1182 <tr><td>FLOAT32
1183 <tr><td>QASYMMS8
1184 <tr><td>QASYMMU8
1185 <tr><td>QSYMMS16
1186 <tr><td>SIGNED32
1187 </table>
1188<tr>
1189 <td>CpuAcc
1190 <td>
1191 <ul>
1192 <li>All
1193 </ul>
1194 <td>
1195 <table>
1196 <tr><th>
1197 <tr><td>FLOAT16
1198 <tr><td>FLOAT32
1199 </table>
1200<tr>
1201 <td>GpuAcc
1202 <td>
1203 <ul>
1204 <li>All
1205 </ul>
1206 <td>
1207 <table>
1208 <tr><th>
1209 <tr><td>FLOAT16
1210 <tr><td>FLOAT32
1211 </table>
1212<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01001213 <td rowspan="3">ElementwiseUnaryLayer
1214 <td rowspan="3" style="width:200px;"> Layer to perform Rsqrt - Exp - Neg - Log - Abs - Sin - Sqrt operations.
1215 <td rowspan="3">
1216 <ul>
1217 <li>ANEURALNETWORKS_ABS
1218 <li>ANEURALNETWORKS_EXP
1219 <li>ANEURALNETWORKS_LOG
1220 <li>ANEURALNETWORKS_NEG
1221 <li>ANEURALNETWORKS_RSQRT
1222 <li>ANEURALNETWORKS_SIN
1223 <li>ANEURALNETWORKS_SQRT
1224 </ul>
1225 <td>CpuRef
1226 <td>
1227 <ul>
1228 <li>All
1229 </ul>
1230 <td>
1231 <table>
1232 <tr><th>
1233 <tr><td>BFLOAT16
1234 <tr><td>FLOAT16
1235 <tr><td>FLOAT32
1236 <tr><td>QASYMMS8
1237 <tr><td>QASYMMU8
1238 <tr><td>QSYMMS16
1239 </table>
1240<tr>
1241 <td>CpuAcc
1242 <td>
1243 <ul>
1244 <li>All
1245 </ul>
1246 <td>
1247 <table>
1248 <tr><th>
1249 <tr><td>FLOAT16
1250 <tr><td>FLOAT32
1251 <tr><td>SIGNED32
1252 </table>
1253<tr>
1254 <td>GpuAcc
1255 <td>
1256 <ul>
1257 <li>All
1258 </ul>
1259 <td>
1260 <table>
1261 <tr><th>
1262 <tr><td>FLOAT16
1263 <tr><td>FLOAT32
1264 </table>
1265<tr>
1266 <td rowspan="1">FakeQuantizationLayer
1267 <td rowspan="1" style="width:200px;"> Layer to quantize float values and dequantize afterwards. The current implementation does not dequantize the values.
1268 <td rowspan="1">
1269 <ul>
1270 <li>N/A
1271 </ul>
1272 <td>CpuRef
1273 <td>
1274 <ul>
1275 <li>All
1276 </ul>
1277 <td>
1278 <table>
1279 <tr><th>
1280 <tr><td>FLOAT32
1281 </table>
1282<tr>
1283 <td rowspan="3">FillLayer
1284 <td rowspan="3" style="width:200px;"> Layer to set the values of a tensor with a given value.
1285 <td rowspan="3">
1286 <ul>
1287 <li>ANEURALNETWORKS_FILL
1288 </ul>
1289 <td>CpuRef
1290 <td>
1291 <ul>
1292 <li>All
1293 </ul>
1294 <td>
1295 <table>
1296 <tr><th>
1297 <tr><td>FLOAT16
1298 <tr><td>FLOAT32
1299 <tr><td>SIGNED32
1300 </table>
1301<tr>
1302 <td>CpuAcc
1303 <td>
1304 <ul>
1305 <li>All
1306 </ul>
1307 <td>
1308 <table>
1309 <tr><th>
1310 <tr><td>All
1311 </table>
1312<tr>
1313 <td>GpuAcc
1314 <td>
1315 <ul>
1316 <li>All
1317 </ul>
1318 <td>
1319 <table>
1320 <tr><th>
1321 <tr><td>All
1322 </table>
1323<tr>
1324 <td rowspan="3">FloorLayer
1325 <td rowspan="3" style="width:200px;"> Layer to round the value to the lowest whole number.
1326 <td rowspan="3">
1327 <ul>
1328 <li>ANEURALNETWORKS_FLOOR
1329 </ul>
1330 <td>CpuRef
1331 <td>
1332 <ul>
1333 <li>All
1334 </ul>
1335 <td>
1336 <table>
1337 <tr><th>
1338 <tr><td>BFLOAT16
1339 <tr><td>FLOAT16
1340 <tr><td>FLOAT32
1341 </table>
1342<tr>
1343 <td>CpuAcc
1344 <td>
1345 <ul>
1346 <li>All
1347 </ul>
1348 <td>
1349 <table>
1350 <tr><th>
1351 <tr><td>FLOAT32
1352 <tr><td>FLOAT16
1353 </table>
1354<tr>
1355 <td>GpuAcc
1356 <td>
1357 <ul>
1358 <li>All
1359 </ul>
1360 <td>
1361 <table>
1362 <tr><th>
1363 <tr><td>FLOAT32
1364 <tr><td>FLOAT16
1365 </table>
1366<tr>
1367 <td rowspan="3">FullyConnectedLayer
1368 <td rowspan="3" style="width:200px;"> Layer to perform a fully connected / dense operation.
1369 <td rowspan="3">
1370 <ul>
1371 <li>ANEURALNETWORKS_FULLY_CONNECTED
1372 </ul>
1373 <td>CpuRef
1374 <td>
1375 <ul>
1376 <li>All
1377 </ul>
1378 <td>
1379 <table>
1380 <tr><th>
1381 <tr><td>BFLOAT16
1382 <tr><td>FLOAT16
1383 <tr><td>FLOAT32
1384 <tr><td>QASYMMS8
1385 <tr><td>QASYMMU8
1386 <tr><td>QSYMMS16
1387 </table>
1388<tr>
1389 <td>CpuAcc
1390 <td>
1391 <ul>
1392 <li>NHWC
1393 <li>NCHW
1394 </ul>
1395 <td>
1396 <table>
1397 <tr><th>
1398 <tr><td>SIGNED32
1399 <tr><td>FLOAT16
1400 <tr><td>FLOAT32
1401 <tr><td>QASYMMU8
1402 <tr><td>QASYMMS8
1403 </table>
1404<tr>
1405 <td>GpuAcc
1406 <td>
1407 <ul>
1408 <li>NHWC
1409 <li>NCHW
1410 </ul>
1411 <td>
1412 <table>
1413 <tr><th>
1414 <tr><td>SIGNED32
1415 <tr><td>FLOAT16
1416 <tr><td>FLOAT32
1417 <tr><td>QASYMMU8
1418 <tr><td>QASYMMS8
1419 </table>
1420<tr>
1421 <td rowspan="3">GatherLayer
1422 <td rowspan="3" style="width:200px;"> Layer to perform the gather operation along the chosen axis.
1423 <td rowspan="3">
1424 <ul>
1425 <li>ANEURALNETWORKS_GATHER
1426 </ul>
1427 <td>CpuRef
1428 <td>
1429 <ul>
1430 <li>All
1431 </ul>
1432 <td>
1433 <table>
1434 <tr><th>
1435 <tr><td>BFLOAT16
1436 <tr><td>FLOAT16
1437 <tr><td>FLOAT32
1438 <tr><td>QASYMMS8
1439 <tr><td>QASYMMU8
1440 <tr><td>QSYMMS16
1441 <tr><td>SIGNED32
1442 </table>
1443<tr>
1444 <td>CpuAcc
1445 <td>
1446 <ul>
1447 <li>All
1448 </ul>
1449 <td>
1450 <table>
1451 <tr><th>
1452 <tr><td>All
1453 </table>
1454<tr>
1455 <td>GpuAcc
1456 <td>
1457 <ul>
1458 <li>All
1459 </ul>
1460 <td>
1461 <table>
1462 <tr><th>
1463 <tr><td>All
1464 </table>
1465<tr>
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001466 <td rowspan="3">GatherNdLayer
1467 <td rowspan="3" style="width:200px;"> Layer to perform the gatherNd operation.
1468 <td rowspan="3">
1469 <ul>
1470 <li>N/A
1471 </ul>
1472 <td>CpuRef
1473 <td>
1474 <ul>
1475 <li>All
1476 </ul>
1477 <td>
1478 <table>
1479 <tr><th>
1480 <tr><td>BFLOAT16
1481 <tr><td>FLOAT16
1482 <tr><td>FLOAT32
1483 <tr><td>QASYMMS8
1484 <tr><td>QASYMMU8
1485 <tr><td>QSYMMS16
1486 <tr><td>SIGNED32
1487 </table>
1488<tr>
1489 <td>CpuAcc
1490 <td>
1491 <ul>
Teresa Charlinbd22c7d2022-04-26 18:14:12 +01001492 <li>All
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001493 </ul>
1494 <td>
Teresa Charlinbd22c7d2022-04-26 18:14:12 +01001495 <table>
1496 <tr><th>
1497 <tr><td>BFLOAT16
1498 <tr><td>FLOAT16
1499 <tr><td>FLOAT32
1500 <tr><td>QASYMMS8
1501 <tr><td>QASYMMU8
1502 <tr><td>QSYMMS16
1503 <tr><td>SIGNED32
1504 </table>
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001505<tr>
1506 <td>GpuAcc
1507 <td>
1508 <ul>
Teresa Charlin989e2f62022-04-27 16:26:11 +01001509 <li>All
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001510 </ul>
1511 <td>
Teresa Charlin989e2f62022-04-27 16:26:11 +01001512 <table>
1513 <tr><th>
1514 <tr><td>BFLOAT16
1515 <tr><td>FLOAT16
1516 <tr><td>FLOAT32
1517 <tr><td>QASYMMS8
1518 <tr><td>QASYMMU8
1519 <tr><td>QSYMMS16
1520 <tr><td>SIGNED32
1521 </table>
Teresa Charlinb2d3ec52022-04-12 22:07:09 +01001522<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01001523 <td rowspan="1">InputLayer
1524 <td rowspan="1" style="width:200px;"> Special layer used to provide input data to the computational network.
1525 <td rowspan="1">
1526 <ul>
1527 <li>N/A
1528 </ul>
1529 <td>All
1530 <td>
1531 <ul>
1532 <li>All
1533 </ul>
1534 <td>
1535 <table>
1536 <tr><th>
1537 <tr><td>All
1538 </table>
1539<tr>
1540 <td rowspan="3">InstanceNormalizationLayer
1541 <td rowspan="3" style="width:200px;"> Layer to perform an instance normalization on a given axis.
1542 <td rowspan="3">
1543 <ul>
1544 <li>ANEURALNETWORKS_INSTANCE_NORMALIZATION
1545 </ul>
1546 <td>CpuRef
1547 <td>
1548 <ul>
1549 <li>All
1550 </ul>
1551 <td>
1552 <table>
1553 <tr><th>
1554 <tr><td>BFLOAT16
1555 <tr><td>FLOAT16
1556 <tr><td>FLOAT32
1557 </table>
1558<tr>
1559 <td>CpuAcc
1560 <td>
1561 <ul>
1562 <li>NHWC
1563 <li>NCHW
1564 </ul>
1565 <td>
1566 <table>
1567 <tr><th>
1568 <tr><td>FLOAT16
1569 <tr><td>FLOAT32
1570 </table>
1571<tr>
1572 <td>GpuAcc
1573 <td>
1574 <ul>
1575 <li>NHWC
1576 <li>NCHW
1577 </ul>
1578 <td>
1579 <table>
1580 <tr><th>
1581 <tr><td>FLOAT16
1582 <tr><td>FLOAT32
1583 </table>
1584<tr>
1585 <td rowspan="3">L2NormalizationLayer
1586 <td rowspan="3" style="width:200px;"> Layer to perform an L2 normalization on a given axis.
1587 <td rowspan="3">
1588 <ul>
1589 <li>ANEURALNETWORKS_L2_NORMALIZATION
1590 </ul>
1591 <td>CpuRef
1592 <td>
1593 <ul>
1594 <li>All
1595 </ul>
1596 <td>
1597 <table>
1598 <tr><th>
1599 <tr><td>BFLOAT16
1600 <tr><td>FLOAT16
1601 <tr><td>FLOAT32
1602 <tr><td>QASYMMS8
1603 <tr><td>QASYMMU8
1604 <tr><td>QSYMMS16
1605 </table>
1606<tr>
1607 <td>CpuAcc
1608 <td>
1609 <ul>
1610 <li>NHWC
1611 <li>NCHW
1612 </ul>
1613 <td>
1614 <table>
1615 <tr><th>
1616 <tr><td>FLOAT16
1617 <tr><td>FLOAT32
1618 </table>
1619<tr>
1620 <td>GpuAcc
1621 <td>
1622 <ul>
1623 <li>NHWC
1624 <li>NCHW
1625 </ul>
1626 <td>
1627 <table>
1628 <tr><th>
1629 <tr><td>FLOAT16
1630 <tr><td>FLOAT32
1631 </table>
1632<tr>
1633 <td rowspan="3">LogSoftmaxLayer
1634 <td rowspan="3" style="width:200px;"> Layer to perform the log softmax activations given logits.
1635 <td rowspan="3">
1636 <ul>
1637 <li>N/A
1638 </ul>
1639 <td>CpuRef
1640 <td>
1641 <ul>
1642 <li>All
1643 </ul>
1644 <td>
1645 <table>
1646 <tr><th>
1647 <tr><td>BFLOAT16
1648 <tr><td>FLOAT16
1649 <tr><td>FLOAT32
1650 </table>
1651<tr>
1652 <td>CpuAcc
1653 <td>
1654 <ul>
1655 <li>All
1656 </ul>
1657 <td>
1658 <table>
1659 <tr><th>
1660 <tr><td>QASYMMU8
1661 <tr><td>QASYMMS8
1662 <tr><td>FLOAT16
1663 <tr><td>FLOAT32
1664 </table>
1665<tr>
1666 <td>GpuAcc
1667 <td>
1668 <ul>
1669 <li>All
1670 </ul>
1671 <td>
1672 <table>
1673 <tr><th>
1674 <tr><td>QASYMMU8
1675 <tr><td>QASYMMS8
1676 <tr><td>FLOAT16
1677 <tr><td>FLOAT32
1678 </table>
1679<tr>
1680 <td rowspan="3">LogicalBinaryLayer
1681 <td rowspan="3" style="width:200px;"> Layer to perform Logical AND - Logical NOT - Logical OR operations.
1682 <td rowspan="3">
1683 <ul>
1684 <li>ANEURALNETWORKS_LOGICAL_AND
1685 <li>ANEURALNETWORKS_LOGICAL_NOT
1686 <li>ANEURALNETWORKS_LOGICAL_OR
1687 </ul>
1688 <td>CpuRef
1689 <td>
1690 <ul>
1691 <li>All
1692 </ul>
1693 <td>
1694 <table>
1695 <tr><th>
1696 <tr><td>BOOLEAN
1697 </table>
1698<tr>
1699 <td>CpuAcc
1700 <td>
1701 <ul>
1702 <li>All
1703 </ul>
1704 <td>
1705 <table>
1706 <tr><th>
1707 <tr><td>BOOLEAN
1708 </table>
1709<tr>
1710 <td>GpuAcc
1711 <td>
1712 <ul>
1713 <li>All
1714 </ul>
1715 <td>
1716 <table>
1717 <tr><th>
1718 <tr><td>BOOLEAN
1719 </table>
1720<tr>
1721 <td rowspan="3">LstmLayer
1722 <td rowspan="3" style="width:200px;"> Layer to perform a single time step in a Long Short-Term Memory (LSTM) operation.
1723 <td rowspan="3">
1724 <ul>
1725 <li>ANEURALNETWORKS_LSTM
1726 </ul>
1727 <td>CpuRef
1728 <td>
1729 <ul>
1730 <li>All
1731 </ul>
1732 <td>
1733 <table>
1734 <tr><th>
1735 <tr><td>BFLOAT16
1736 <tr><td>FLOAT16
1737 <tr><td>QSYMMS16
1738 </table>
1739<tr>
1740 <td>CpuAcc
1741 <td>
1742 <ul>
1743 <li>All
1744 </ul>
1745 <td>
1746 <table>
1747 <tr><th>
1748 <tr><td>FLOAT16
1749 <tr><td>FLOAT32
1750 </table>
1751<tr>
1752 <td>GpuAcc
1753 <td>
1754 <ul>
1755 <li>All
1756 </ul>
1757 <td>
1758 <table>
1759 <tr><th>
1760 <tr><td>FLOAT16
1761 <tr><td>FLOAT32
1762 </table>
1763<tr>
1764 <td rowspan="3">MapLayer
1765 <td rowspan="3" style="width:200px;"> Layer to perform map operation on tensor.
1766 <td rowspan="3">
1767 <ul>
1768 <li>N/A
1769 </ul>
1770 <td>CpuRef
1771 <td>
1772 <ul>
1773 <li>All
1774 </ul>
1775 <td>
1776 <table>
1777 <tr><th>
1778 <tr><td>All
1779 </table>
1780<tr>
1781 <td>CpuAcc
1782 <td>
1783 <ul>
1784 <li>All
1785 </ul>
1786 <td>
1787 <table>
1788 <tr><th>
1789 <tr><td>All
1790 </table>
1791<tr>
1792 <td>GpuAcc
1793 <td>
1794 <ul>
1795 <li>All
1796 </ul>
1797 <td>
1798 <table>
1799 <tr><th>
1800 <tr><td>All
1801 </table>
1802<tr>
1803 <td rowspan="3">MaximumLayer
1804 <td rowspan="3" style="width:200px;"> Layer to perform an elementwise maximum of two tensors.
1805 <td rowspan="3">
1806 <ul>
1807 <li>N/A
1808 </ul>
1809 <td>CpuRef
1810 <td>
1811 <ul>
1812 <li>All
1813 </ul>
1814 <td>
1815 <table>
1816 <tr><th>
1817 <tr><td>BFLOAT16
1818 <tr><td>FLOAT16
1819 <tr><td>FLOAT32
1820 <tr><td>QASYMMS8
1821 <tr><td>QASYMMU8
1822 <tr><td>QSYMMS16
1823 <tr><td>SIGNED32
1824 </table>
1825<tr>
1826 <td>CpuAcc
1827 <td>
1828 <ul>
1829 <li>All
1830 </ul>
1831 <td>
1832 <table>
1833 <tr><th>
1834 <tr><td>QASYMMU8
1835 <tr><td>QASYMMS8
1836 <tr><td>FLOAT16
1837 <tr><td>FLOAT32
1838 <tr><td>SIGNED32
1839 </table>
1840<tr>
1841 <td>GpuAcc
1842 <td>
1843 <ul>
1844 <li>All
1845 </ul>
1846 <td>
1847 <table>
1848 <tr><th>
1849 <tr><td>QASYMMU8
1850 <tr><td>QASYMMS8
1851 <tr><td>QSYMMS16
1852 <tr><td>FLOAT16
1853 <tr><td>FLOAT32
1854 <tr><td>SIGNED32
1855 </table>
1856<tr>
1857 <td rowspan="3">MeanLayer
1858 <td rowspan="3" style="width:200px;"> Layer to perform reduce mean operation.
1859 <td rowspan="3">
1860 <ul>
1861 <li>ANEURALNETWORKS_MEAN
1862 </ul>
1863 <td>CpuRef
1864 <td>
1865 <ul>
1866 <li>All
1867 </ul>
1868 <td>
1869 <table>
1870 <tr><th>
1871 <tr><td>BFLOAT16
1872 <tr><td>FLOAT16
1873 <tr><td>FLOAT32
1874 <tr><td>QASYMMS8
1875 <tr><td>QASYMMU8
1876 <tr><td>QSYMMS16
1877 </table>
1878<tr>
1879 <td>CpuAcc
1880 <td>
1881 <ul>
1882 <li>All
1883 </ul>
1884 <td>
1885 <table>
1886 <tr><th>
1887 <tr><td>QASYMMU8
1888 <tr><td>QASYMMS8
1889 <tr><td>FLOAT16
1890 <tr><td>FLOAT32
1891 </table>
1892<tr>
1893 <td>GpuAcc
1894 <td>
1895 <ul>
1896 <li>All
1897 </ul>
1898 <td>
1899 <table>
1900 <tr><th>
1901 <tr><td>QASYMMU8
1902 <tr><td>QASYMMS8
1903 <tr><td>FLOAT16
1904 <tr><td>FLOAT32
1905 </table>
1906<tr>
1907 <td rowspan="3">MemCopyLayer
1908 <td rowspan="3" style="width:200px;"> Layer to perform memory copy operation.
1909 <td rowspan="3">
1910 <ul>
1911 <li>N/A
1912 </ul>
1913 <td>CpuRef
1914 <td>
1915 <ul>
1916 <li>All
1917 </ul>
1918 <td>
1919 <table>
1920 <tr><th>
1921 <tr><td>BFLOAT16
1922 <tr><td>FLOAT16
1923 <tr><td>FLOAT32
1924 <tr><td>QASYMMS8
1925 <tr><td>QASYMMU8
1926 <tr><td>QSYMMS16
1927 <tr><td>BOOLEAN
1928 </table>
1929<tr>
1930 <td>CpuAcc
1931 <td>
1932 <ul>
1933 <li>All
1934 </ul>
1935 <td>
1936 <table>
1937 <tr><th>
1938 <tr><td>All
1939 </table>
1940<tr>
1941 <td>GpuAcc
1942 <td>
1943 <ul>
1944 <li>All
1945 </ul>
1946 <td>
1947 <table>
1948 <tr><th>
1949 <tr><td>All
1950 </table>
1951<tr>
1952 <td rowspan="3">MemImportLayer
1953 <td rowspan="3" style="width:200px;"> Layer to perform memory import operation.
1954 <td rowspan="3">
1955 <ul>
1956 <li>N/A
1957 </ul>
1958 <td>CpuRef
1959 <td>
1960 <ul>
1961 <li>All
1962 </ul>
1963 <td>
1964 <table>
1965 <tr><th>
1966 <tr><td>All
1967 </table>
1968<tr>
1969 <td>CpuAcc
1970 <td>
1971 <ul>
1972 <li>All
1973 </ul>
1974 <td>
1975 <table>
1976 <tr><th>
1977 <tr><td>All
1978 </table>
1979<tr>
1980 <td>GpuAcc
1981 <td>
1982 <ul>
1983 <li>All
1984 </ul>
1985 <td>
1986 <table>
1987 <tr><th>
1988 <tr><td>All
1989 </table>
1990<tr>
1991 <td rowspan="3">MergeLayer
1992 <td rowspan="3" style="width:200px;"> Layer to concatenate tensors along a given axis.
1993 <td rowspan="3">
1994 <ul>
1995 <li>ANEURALNETWORKS_CONCATENATION
1996 </ul>
1997 <td>CpuRef
1998 <td>
1999 <ul>
2000 <li>All
2001 </ul>
2002 <td>
2003 <table>
2004 <tr><th>
2005 <tr><td>BFLOAT16
2006 <tr><td>FLOAT16
2007 <tr><td>FLOAT32
2008 <tr><td>QASYMMS8
2009 <tr><td>QASYMMU8
2010 <tr><td>QSYMMS16
2011 </table>
2012<tr>
2013 <td>CpuAcc
2014 <td>
2015 <ul>
2016 <li>All
2017 </ul>
2018 <td>
2019 <table>
2020 <tr><th>
2021 <tr><td>QASYMMU8
2022 <tr><td>QASYMMS8
2023 <tr><td>FLOAT16
2024 <tr><td>FLOAT32
2025 </table>
2026<tr>
2027 <td>GpuAcc
2028 <td>
2029 <ul>
2030 <li>All
2031 </ul>
2032 <td>
2033 <table>
2034 <tr><th>
2035 <tr><td>QASYMMU8
2036 <tr><td>QASYMMS8
2037 <tr><td>FLOAT16
2038 <tr><td>FLOAT32
2039 </table>
2040<tr>
2041 <td rowspan="3">MinimumLayer
2042 <td rowspan="3" style="width:200px;"> Layer to perform an elementwise minimum of two tensors.
2043 <td rowspan="3">
2044 <ul>
2045 <li>ANEURALNETWORKS_MINIMUM
2046 </ul>
2047 <td>CpuRef
2048 <td>
2049 <ul>
2050 <li>All
2051 </ul>
2052 <td>
2053 <table>
2054 <tr><th>
2055 <tr><td>BFLOAT16
2056 <tr><td>FLOAT16
2057 <tr><td>FLOAT32
2058 <tr><td>QASYMMS8
2059 <tr><td>QASYMMU8
2060 <tr><td>QSYMMS16
2061 <tr><td>SIGNED32
2062 </table>
2063<tr>
2064 <td>CpuAcc
2065 <td>
2066 <ul>
2067 <li>All
2068 </ul>
2069 <td>
2070 <table>
2071 <tr><th>
2072 <tr><td>QASYMMU8
2073 <tr><td>QASYMMS8
2074 <tr><td>QSYMMS16
2075 <tr><td>FLOAT16
2076 <tr><td>FLOAT32
2077 </table>
2078<tr>
2079 <td>GpuAcc
2080 <td>
2081 <ul>
2082 <li>All
2083 </ul>
2084 <td>
2085 <table>
2086 <tr><th>
2087 <tr><td>QASYMMU8
2088 <tr><td>QASYMMS8
2089 <tr><td>QSYMMS16
2090 <tr><td>FLOAT16
2091 <tr><td>FLOAT32
2092 <tr><td>SIGNED32
2093 </table>
2094<tr>
2095 <td rowspan="3">MultiplicationLayer
2096 <td rowspan="3" style="width:200px;"> Layer to perform an elementwise multiplication of two tensors.
2097 <td rowspan="3">
2098 <ul>
2099 <li>ANEURALNETWORKS_MUL
2100 </ul>
2101 <td>CpuRef
2102 <td>
2103 <ul>
2104 <li>All
2105 </ul>
2106 <td>
2107 <table>
2108 <tr><th>
2109 <tr><td>BFLOAT16
2110 <tr><td>FLOAT16
2111 <tr><td>FLOAT32
2112 <tr><td>QASYMMS8
2113 <tr><td>QASYMMU8
2114 <tr><td>QSYMMS16
2115 <tr><td>SIGNED32
2116 </table>
2117<tr>
2118 <td>CpuAcc
2119 <td>
2120 <ul>
2121 <li>All
2122 </ul>
2123 <td>
2124 <table>
2125 <tr><th>
2126 <tr><td>QASYMMU8
2127 <tr><td>QASYMMS8
2128 <tr><td>QSYMMS16
2129 <tr><td>SIGNED32
2130 <tr><td>FLOAT16
2131 <tr><td>FLOAT32
2132 </table>
2133<tr>
2134 <td>GpuAcc
2135 <td>
2136 <ul>
2137 <li>All
2138 </ul>
2139 <td>
2140 <table>
2141 <tr><th>
2142 <tr><td>QASYMMU8
2143 <tr><td>QASYMMS8
2144 <tr><td>QSYMMS16
2145 <tr><td>SIGNED32
2146 <tr><td>FLOAT16
2147 <tr><td>FLOAT32
2148 <tr><td>SIGNED32
2149 </table>
2150<tr>
2151 <td rowspan="3">NormalizationLayer
2152 <td rowspan="3" style="width:200px;"> Layer to compute normalization operation.
2153 <td rowspan="3">
2154 <ul>
2155 <li>ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION
2156 </ul>
2157 <td>CpuRef
2158 <td>
2159 <ul>
2160 <li>All
2161 </ul>
2162 <td>
2163 <table>
2164 <tr><th>
2165 <tr><td>BFLOAT16
2166 <tr><td>FLOAT16
2167 <tr><td>FLOAT32
2168 <tr><td>QASYMMS8
2169 <tr><td>QASYMMU8
2170 <tr><td>QSYMMS16
2171 </table>
2172<tr>
2173 <td>CpuAcc
2174 <td>
2175 <ul>
2176 <li>NHWC
2177 <li>NCHW
2178 </ul>
2179 <td>
2180 <table>
2181 <tr><th>
2182 <tr><td>FLOAT32
2183 <tr><td>FLOAT16
2184 </table>
2185<tr>
2186 <td>GpuAcc
2187 <td>
2188 <ul>
2189 <li>NHWC
2190 <li>NCHW
2191 </ul>
2192 <td>
2193 <table>
2194 <tr><th>
2195 <tr><td>FLOAT32
2196 <tr><td>FLOAT16
2197 </table>
2198<tr>
2199 <td rowspan="1">OutputLayer
2200 <td rowspan="1" style="width:200px;"> A special layer providing access to a user supplied buffer into which the output of a network can be written.
2201 <td rowspan="1">
2202 <ul>
2203 <li>N/A
2204 </ul>
2205 <td>All
2206 <td>
2207 <ul>
2208 <li>All
2209 </ul>
2210 <td>
2211 <table>
2212 <tr><th>
2213 <tr><td>All
2214 </table>
2215<tr>
2216 <td rowspan="3">PadLayer
2217 <td rowspan="3" style="width:200px;"> Layer to pad a tensor.
2218 <td rowspan="3">
2219 <ul>
2220 <li>ANEURALNETWORKS_PAD
2221 <li>ANEURALNETWORKS_PAD_V2
2222 </ul>
2223 <td>CpuRef
2224 <td>
2225 <ul>
2226 <li>All
2227 </ul>
2228 <td>
2229 <table>
2230 <tr><th>
2231 <tr><td>BFLOAT16
2232 <tr><td>FLOAT16
2233 <tr><td>FLOAT32
2234 <tr><td>QASYMMS8
2235 <tr><td>QASYMMU8
2236 <tr><td>QSYMMS16
2237 </table>
2238<tr>
2239 <td>CpuAcc
2240 <td>
2241 <ul>
2242 <li>NHWC
2243 <li>NCHW
2244 </ul>
2245 <td>
2246 <table>
2247 <tr><th>
2248 <tr><td>All
2249 </table>
2250<tr>
2251 <td>GpuAcc
2252 <td>
2253 <ul>
2254 <li>NHWC
2255 <li>NCHW
2256 </ul>
2257 <td>
2258 <table>
2259 <tr><th>
2260 <tr><td>All
2261 </table>
2262<tr>
2263 <td rowspan="3">PermuteLayer
2264 <td rowspan="3" style="width:200px;"> Layer to transpose an ND tensor.
2265 <td rowspan="3">
2266 <ul>
2267 <li>ANEURALNETWORKS_TRANSPOSE
2268 </ul>
2269 <td>CpuRef
2270 <td>
2271 <ul>
2272 <li>All
2273 </ul>
2274 <td>
2275 <table>
2276 <tr><th>
2277 <tr><td>BFLOAT16
2278 <tr><td>FLOAT16
2279 <tr><td>FLOAT32
2280 <tr><td>QASYMMS8
2281 <tr><td>QASYMMU8
2282 <tr><td>QSYMMS16
2283 </table>
2284<tr>
2285 <td>CpuAcc
2286 <td>
2287 <ul>
2288 <li>NHWC
2289 <li>NCHW
2290 </ul>
2291 <td>
2292 <table>
2293 <tr><th>
2294 <tr><td>All
2295 </table>
2296<tr>
2297 <td>GpuAcc
2298 <td>
2299 <ul>
2300 <li>NHWC
2301 <li>NCHW
2302 </ul>
2303 <td>
2304 <table>
2305 <tr><th>
2306 <tr><td>All
2307 </table>
2308<tr>
2309 <td rowspan="3">Pooling2dLayer
Tamás Nyíri7b885b32021-10-26 14:47:57 +01002310 <td rowspan="3" style="width:200px;"> Layer to perform 2D pooling with the specified pooling operation.
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002311 <td rowspan="3">
2312 <ul>
2313 <li>ANEURALNETWORKS_AVERAGE_POOL_2D
2314 <li>ANEURALNETWORKS_L2_POOL_2D
2315 <li>ANEURALNETWORKS_MAX_POOL_2D
2316 </ul>
2317 <td>CpuRef
2318 <td>
2319 <ul>
2320 <li>All
2321 </ul>
2322 <td>
2323 <table>
2324 <tr><th>
2325 <tr><td>BFLOAT16
2326 <tr><td>FLOAT16
2327 <tr><td>FLOAT32
2328 <tr><td>QASYMMS8
2329 <tr><td>QASYMMU8
2330 <tr><td>QSYMMS16
2331 </table>
2332<tr>
2333 <td>CpuAcc
2334 <td>
2335 <ul>
2336 <li>NHWC
2337 <li>NCHW
2338 </ul>
2339 <td>
2340 <table>
2341 <tr><th>
2342 <tr><td>QASYMMU8
2343 <tr><td>QASYMMS8
2344 <tr><td>FLOAT16
2345 <tr><td>FLOAT32
2346 </table>
2347<tr>
2348 <td>GpuAcc
2349 <td>
2350 <ul>
2351 <li>NHWC
2352 <li>NCHW
2353 </ul>
2354 <td>
2355 <table>
2356 <tr><th>
2357 <tr><td>QASYMMU8
2358 <tr><td>QASYMMS8
2359 <tr><td>FLOAT16
2360 <tr><td>FLOAT32
2361 </table>
2362<tr>
Tamás Nyíri7b885b32021-10-26 14:47:57 +01002363 <td rowspan="3">Pooling3dLayer
2364 <td rowspan="3" style="width:200px;"> Layer to perform 3D pooling with the specified pooling operation.
2365 <td rowspan="3">
2366 <ul>
2367 <li>ANEURALNETWORKS_AVERAGE_POOL_3D
2368 <li>ANEURALNETWORKS_L2_POOL_3D
2369 <li>ANEURALNETWORKS_MAX_POOL_3D
2370 </ul>
2371 <td>CpuRef
2372 <td>
2373 <ul>
2374 <li>NDHWC
2375 </ul>
2376 <td>
2377 <table>
2378 <tr><th>
2379 <tr><td>BFLOAT16
2380 <tr><td>FLOAT16
2381 <tr><td>FLOAT32
2382 <tr><td>QASYMMS8
2383 <tr><td>QASYMMU8
2384 <tr><td>QSYMMS16
2385 </table>
2386<tr>
2387 <td>CpuAcc
2388 <td>
2389 <ul>
2390 <li>NA
2391 </ul>
2392 <td>
2393<tr>
2394 <td>GpuAcc
2395 <td>
2396 <ul>
2397 <li>NDHWC
2398 </ul>
2399<tr>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002400 <td rowspan="1">PreCompiledLayer
2401 <td rowspan="1" style="width:200px;"> Opaque layer provided by a backend which provides an executable representation of a subgraph from the original network.
2402 <td rowspan="1">
2403 <ul>
2404 <li>N/A
2405 </ul>
2406 <td>N/A
2407 <td>N/A
2408 <td>N/A
2409<tr>
2410 <td rowspan="3">PreluLayer
2411 <td rowspan="3" style="width:200px;"> Layer to compute the activation layer with the PRELU activation function.
2412 <td rowspan="3">
2413 <ul>
2414 <li>ANEURALNETWORKS_PRELU
2415 </ul>
2416 <td>CpuRef
2417 <td>
2418 <ul>
2419 <li>All
2420 </ul>
2421 <td>
2422 <table>
2423 <tr><th>
2424 <tr><td>BFLOAT16
2425 <tr><td>FLOAT16
2426 <tr><td>FLOAT32
2427 <tr><td>QASYMMS8
2428 <tr><td>QASYMMU8
2429 <tr><td>QSYMMS16
2430 </table>
2431<tr>
2432 <td>CpuAcc
2433 <td>
2434 <ul>
2435 <li>All
2436 </ul>
2437 <td>
2438 <table>
2439 <tr><th>
2440 <tr><td>QASYMMU8
2441 <tr><td>QASYMMS8
2442 <tr><td>FLOAT16
2443 <tr><td>FLOAT32
2444 </table>
2445<tr>
2446 <td>GpuAcc
2447 <td>
2448 <ul>
2449 <li>All
2450 </ul>
2451 <td>
2452 <table>
2453 <tr><th>
2454 <tr><td>QASYMMU8
2455 <tr><td>QASYMMS8
2456 <tr><td>FLOAT16
2457 <tr><td>FLOAT32
2458 </table>
2459<tr>
2460 <td rowspan="3">QLstmLayer
2461 <td rowspan="3" style="width:200px;"> Layer to perform quantized LSTM (Long Short-Term Memory) operation.
2462 <td rowspan="3">
2463 <ul>
2464 <li>ANEURALNETWORKS_QUANTIZED_LSTM
2465 <li>ANEURALNETWORKS_QUANTIZED_16BIT_LSTM
2466 </ul>
2467 <td>CpuRef
2468 <td>
2469 <ul>
2470 <li>All
2471 </ul>
2472 <td>
2473 <table>
2474 <tr><th>
2475 <tr><td>All
2476 </table>
2477<tr>
2478 <td>CpuAcc
2479 <td>
2480 <ul>
2481 <li>All
2482 </ul>
2483 <td>
2484 <table>
2485 <tr><th>
2486 <tr><td>QASYMMS8
2487 <tr><td>QASYMMU8
2488 <tr><td>SIGNED32
2489 <tr><td>QSYMMS16
2490 </table>
2491<tr>
2492 <td>GpuAcc
2493 <td>
2494 <ul>
2495 <li>All
2496 </ul>
2497 <td>
2498 <table>
2499 <tr><th>
2500 <tr><td>QASYMMS8
2501 <tr><td>QASYMMU8
2502 <tr><td>SIGNED32
2503 <tr><td>QSYMMS16
2504 </table>
2505<tr>
2506 <td rowspan="3">QuantizeLayer
2507 <td rowspan="3" style="width:200px;"> Layer to perform quantization operation.
2508 <td rowspan="3">
2509 <ul>
2510 <li>ANEURALNETWORKS_QUANTIZE
2511 </ul>
2512 <td>CpuRef
2513 <td>
2514 <ul>
2515 <li>All
2516 </ul>
2517 <td>
2518 <table>
2519 <tr><th>
2520 <tr><td>BFLOAT16
2521 <tr><td>FLOAT16
2522 <tr><td>FLOAT32
2523 <tr><td>QASYMMS8
2524 <tr><td>QASYMMU8
2525 <tr><td>QSYMMS8
2526 <tr><td>QSYMMS16
2527 </table>
2528<tr>
2529 <td>CpuAcc
2530 <td>
2531 <ul>
2532 <li>All
2533 </ul>
2534 <td>
2535 <table>
2536 <tr><th>
2537 <tr><td>QASYMMU8
2538 <tr><td>QASYMMS8
2539 <tr><td>QASYMM16
2540 <tr><td>FLOAT16
2541 <tr><td>FLOAT32
2542 </table>
2543<tr>
2544 <td>GpuAcc
2545 <td>
2546 <ul>
2547 <li>All
2548 </ul>
2549 <td>
2550 <table>
2551 <tr><th>
2552 <tr><td>QASYMMU8
2553 <tr><td>QASYMMS8
2554 <tr><td>QASYMM16
2555 <tr><td>FLOAT16
2556 <tr><td>FLOAT32
2557 </table>
2558<tr>
2559 <td rowspan="3">QuantizedLstmLayer
2560 <td rowspan="3" style="width:200px;"> Layer to perform quantized LSTM (Long Short-Term Memory) operation.
2561 <td rowspan="3">
2562 <ul>
2563 <li>ANEURALNETWORKS_QUANTIZED_LSTM
2564 <li>ANEURALNETWORKS_QUANTIZED_16BIT_LSTM
2565 </ul>
2566 <td>CpuRef
2567 <td>
2568 <ul>
2569 <li>All
2570 </ul>
2571 <td>
2572 <table>
2573 <tr><th>
2574 <tr><td>All
2575 </table>
2576<tr>
2577 <td>CpuAcc
2578 <td>
2579 <ul>
2580 <li>All
2581 </ul>
2582 <td>
2583 <table>
2584 <tr><th>
2585 <tr><td>SIGNED32
2586 <tr><td>QASYMMU8
2587 <tr><td>QSYMMS16
2588 </table>
2589<tr>
2590 <td>GpuAcc
2591 <td>
2592 <ul>
2593 <li>All
2594 </ul>
2595 <td>
2596 <table>
2597 <tr><th>
2598 <tr><td>SIGNED32
2599 <tr><td>QASYMMU8
2600 <tr><td>QSYMMS16
2601 </table>
2602<tr>
2603 <td rowspan="3">RankLayer
2604 <td rowspan="3" style="width:200px;"> Layer to perform a rank operation.
2605 <td rowspan="3">
2606 <ul>
2607 <li>ANEURALNETWORKS_RANK
2608 </ul>
2609 <td>CpuRef
2610 <td>
2611 <ul>
2612 <li>All
2613 </ul>
2614 <td>
2615 <table>
2616 <tr><th>
2617 <tr><td>All
2618 </table>
2619<tr>
2620 <td>CpuAcc
2621 <td>
2622 <ul>
2623 <li>All
2624 </ul>
2625 <td>
2626 <table>
2627 <tr><th>
2628 <tr><td>All
2629 </table>
2630<tr>
2631 <td>GpuAcc
2632 <td>
2633 <ul>
2634 <li>All
2635 </ul>
2636 <td>
2637 <table>
2638 <tr><th>
2639 <tr><td>All
2640 </table>
2641<tr>
2642 <td rowspan="3">ReduceLayer
2643 <td rowspan="3" style="width:200px;"> Layer to perform reduce with the following operations - ARG_IDX_MAX: Index of the max value - ARG_IDX_MIN: Index of the min value - MEAN_SUM: Mean of sum - PROD: Product - SUM_SQUARE: Sum of squares - SUM: Sum - MIN: Min - MAX: Max
2644 <td rowspan="3">
2645 <ul>
2646 <li>ANEURALNETWORKS_REDUCE_MAX
2647 <li>ANEURALNETWORKS_REDUCE_MIN
2648 <li>ANEURALNETWORKS_REDUCE_SUM
Teresa Charlin32b78702021-09-03 11:25:54 +01002649 <li>ANEURALNETWORKS_REDUCE_PROD
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01002650 </ul>
2651 <td>CpuRef
2652 <td>
2653 <ul>
2654 <li>All
2655 </ul>
2656 <td>
2657 <table>
2658 <tr><th>
2659 <tr><td>BFLOAT16
2660 <tr><td>FLOAT16
2661 <tr><td>FLOAT32
2662 <tr><td>QASYMMS8
2663 <tr><td>QASYMMU8
2664 <tr><td>QSYMMS16
2665 <tr><td>SIGNED32
2666 </table>
2667<tr>
2668 <td>CpuAcc
2669 <td>
2670 <ul>
2671 <li>All
2672 </ul>
2673 <td>
2674 <table>
2675 <tr><th>
2676 <tr><td>QASYMMU8
2677 <tr><td>QASYMMS8
2678 <tr><td>FLOAT16
2679 <tr><td>FLOAT32
2680 <tr><td>SIGNED32
2681 </table>
2682<tr>
2683 <td>GpuAcc
2684 <td>
2685 <ul>
2686 <li>All
2687 </ul>
2688 <td>
2689 <table>
2690 <tr><th>
2691 <tr><td>QASYMMU8
2692 <tr><td>QASYMMS8
2693 <tr><td>FLOAT16
2694 <tr><td>FLOAT32
2695 <tr><td>SIGNED32
2696 </table>
2697<tr>
2698 <td rowspan="3">ReshapeLayer
2699 <td rowspan="3" style="width:200px;"> Layer to reshape a tensor.
2700 <td rowspan="3">
2701 <ul>
2702 <li>ANEURALNETWORKS_RESHAPE
2703 <li>ANEURALNETWORKS_SQUEEZE
2704 <li>ANEURALNETWORKS_EXPAND_DIMS
2705 </ul>
2706 <td>CpuRef
2707 <td>
2708 <ul>
2709 <li>All
2710 </ul>
2711 <td>
2712 <table>
2713 <tr><th>
2714 <tr><td>BFLOAT16
2715 <tr><td>FLOAT16
2716 <tr><td>FLOAT32
2717 <tr><td>QASYMMS8
2718 <tr><td>QASYMMU8
2719 <tr><td>QSYMMS16
2720 <tr><td>SIGNED32
2721 <tr><td>BOOLEAN
2722 </table>
2723<tr>
2724 <td>CpuAcc
2725 <td>
2726 <ul>
2727 <li>All
2728 </ul>
2729 <td>
2730 <table>
2731 <tr><th>
2732 <tr><td>All
2733 </table>
2734<tr>
2735 <td>GpuAcc
2736 <td>
2737 <ul>
2738 <li>All
2739 </ul>
2740 <td>
2741 <table>
2742 <tr><th>
2743 <tr><td>All
2744 </table>
2745<tr>
2746 <td rowspan="3">ResizeLayer
2747 <td rowspan="3" style="width:200px;"> Layer to perform resize of a tensor using one of the interpolation methods: - Bilinear - Nearest Neighbor.
2748 <td rowspan="3">
2749 <ul>
2750 <li>ANEURALNETWORKS_RESIZE_BILINEAR
2751 <li>ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR
2752 </ul>
2753 <td>CpuRef
2754 <td>
2755 <ul>
2756 <li>All
2757 </ul>
2758 <td>
2759 <table>
2760 <tr><th>
2761 <tr><td>BFLOAT16
2762 <tr><td>FLOAT16
2763 <tr><td>FLOAT32
2764 <tr><td>QASYMMS8
2765 <tr><td>QASYMMU8
2766 <tr><td>QSYMMS16
2767 </table>
2768<tr>
2769 <td>CpuAcc
2770 <td>
2771 <ul>
2772 <li>NHWC
2773 <li>NCHW
2774 </ul>
2775 <td>
2776 <table>
2777 <tr><th>
2778 <tr><td>QASYMMU8
2779 <tr><td>QASYMMS8
2780 <tr><td>FLOAT16
2781 <tr><td>FLOAT32
2782 </table>
2783<tr>
2784 <td>GpuAcc
2785 <td>
2786 <ul>
2787 <li>NHWC
2788 <li>NCHW
2789 </ul>
2790 <td>
2791 <table>
2792 <tr><th>
2793 <tr><td>QASYMMU8
2794 <tr><td>QASYMMS8
2795 <tr><td>FLOAT16
2796 <tr><td>FLOAT32
2797 </table>
2798<tr>
2799 <td rowspan="3">RsqrtLayer
2800 <td rowspan="3" style="width:200px;"> Layer to perform Rsqrt operation.
2801 <td rowspan="3">
2802 <ul>
2803 <li>ANEURALNETWORKS_RSQRT
2804 </ul>
2805 <td>CpuRef
2806 <td>
2807 <ul>
2808 <li>All
2809 </ul>
2810 <td>
2811 <table>
2812 <tr><th>
2813 <tr><td>BFLOAT16
2814 <tr><td>FLOAT16
2815 <tr><td>FLOAT32
2816 <tr><td>QASYMMS8
2817 <tr><td>QASYMMU8
2818 <tr><td>QSYMMS16
2819 <tr><td>SIGNED32
2820 </table>
2821<tr>
2822 <td>CpuAcc
2823 <td>
2824 <ul>
2825 <li>All
2826 </ul>
2827 <td>
2828 <table>
2829 <tr><th>
2830 <tr><td>FLOAT16
2831 <tr><td>FLOAT32
2832 <tr><td>SIGNED32
2833 </table>
2834<tr>
2835 <td>GpuAcc
2836 <td>
2837 <ul>
2838 <li>All
2839 </ul>
2840 <td>
2841 <table>
2842 <tr><th>
2843 <tr><td>FLOAT16
2844 <tr><td>FLOAT32
2845 </table>
2846<tr>
2847 <td rowspan="3">ShapeLayer
2848 <td rowspan="3" style="width:200px;"> Layer to return the shape of the input tensor.
2849 <td rowspan="3">
2850 <ul>
2851 <li>N/A
2852 </ul>
2853 <td>CpuRef
2854 <td>
2855 <ul>
2856 <li>All
2857 </ul>
2858 <td>
2859 <table>
2860 <tr><th>
2861 <tr><td>All
2862 </table>
2863<tr>
2864 <td>CpuAcc
2865 <td>
2866 <ul>
2867 <li>All
2868 </ul>
2869 <td>
2870 <table>
2871 <tr><th>
2872 <tr><td>All
2873 </table>
2874<tr>
2875 <td>GpuAcc
2876 <td>
2877 <ul>
2878 <li>All
2879 </ul>
2880 <td>
2881 <table>
2882 <tr><th>
2883 <tr><td>All
2884 </table>
2885<tr>
2886 <td rowspan="3">SliceLayer
2887 <td rowspan="3" style="width:200px;"> Layer to perform tensor slicing.
2888 <td rowspan="3">
2889 <ul>
2890 <li>ANEURALNETWORKS_SLICE
2891 </ul>
2892 <td>CpuRef
2893 <td>
2894 <ul>
2895 <li>All
2896 </ul>
2897 <td>
2898 <table>
2899 <tr><th>
2900 <tr><td>BFLOAT16
2901 <tr><td>FLOAT32
2902 <tr><td>QASYMMS8
2903 <tr><td>QASYMMU8
2904 <tr><td>QSYMMS16
2905 </table>
2906<tr>
2907 <td>CpuAcc
2908 <td>
2909 <ul>
2910 <li>All
2911 </ul>
2912 <td>
2913 <table>
2914 <tr><th>
2915 <tr><td>All
2916 </table>
2917<tr>
2918 <td>GpuAcc
2919 <td>
2920 <ul>
2921 <li>All
2922 </ul>
2923 <td>
2924 <table>
2925 <tr><th>
2926 <tr><td>All
2927 </table>
2928<tr>
2929 <td rowspan="3">SoftmaxLayer
2930 <td rowspan="3" style="width:200px;"> Layer to perform softmax, log-softmax operation over the specified axis.
2931 <td rowspan="3">
2932 <ul>
2933 <li>ANEURALNETWORKS_LOG_SOFTMAX
2934 <li>ANEURALNETWORKS_SOFTMAX
2935 </ul>
2936 <td>CpuRef
2937 <td>
2938 <ul>
2939 <li>All
2940 </ul>
2941 <td>
2942 <table>
2943 <tr><th>
2944 <tr><td>BFLOAT16
2945 <tr><td>FLOAT16
2946 <tr><td>FLOAT32
2947 <tr><td>QASYMMS8
2948 <tr><td>QASYMMU8
2949 <tr><td>QSYMMS8
2950 <tr><td>QSYMMS16
2951 </table>
2952<tr>
2953 <td>CpuAcc
2954 <td>
2955 <ul>
2956 <li>All
2957 </ul>
2958 <td>
2959 <table>
2960 <tr><th>
2961 <tr><td>QASYMMU8
2962 <tr><td>QASYMMS8
2963 <tr><td>FLOAT16
2964 <tr><td>FLOAT32
2965 </table>
2966<tr>
2967 <td>GpuAcc
2968 <td>
2969 <ul>
2970 <li>All
2971 </ul>
2972 <td>
2973 <table>
2974 <tr><th>
2975 <tr><td>QASYMMU8
2976 <tr><td>QASYMMS8
2977 <tr><td>FLOAT16
2978 <tr><td>FLOAT32
2979 </table>
2980<tr>
2981 <td rowspan="3">SpaceToBatchNdLayer
2982 <td rowspan="3" style="width:200px;"> Layer to divide spatial dimensions of the tensor into a grid of blocks and interleaves these blocks with the batch dimension.
2983 <td rowspan="3">
2984 <ul>
2985 <li>ANEURALNETWORKS_SPACE_TO_BATCH_ND
2986 </ul>
2987 <td>CpuRef
2988 <td>
2989 <ul>
2990 <li>All
2991 </ul>
2992 <td>
2993 <table>
2994 <tr><th>
2995 <tr><td>BFLOAT16
2996 <tr><td>FLOAT16
2997 <tr><td>FLOAT32
2998 <tr><td>QASYMMS8
2999 <tr><td>QASYMMU8
3000 <tr><td>QSYMMS16
3001 </table>
3002<tr>
3003 <td>CpuAcc
3004 <td>
3005 <ul>
3006 <li>NHWC
3007 <li>NCHW
3008 </ul>
3009 <td>
3010 <table>
3011 <tr><th>
3012 <tr><td>All
3013 </table>
3014<tr>
3015 <td>GpuAcc
3016 <td>
3017 <ul>
3018 <li>NHWC
3019 <li>NCHW
3020 </ul>
3021 <td>
3022 <table>
3023 <tr><th>
3024 <tr><td>All
3025 </table>
3026<tr>
3027 <td rowspan="3">SpaceToDepthLayer
3028 <td rowspan="3" style="width:200px;"> Layer to rearrange blocks of spatial data into depth.
3029 <td rowspan="3">
3030 <ul>
3031 <li>ANEURALNETWORKS_SPACE_TO_DEPTH
3032 </ul>
3033 <td>CpuRef
3034 <td>
3035 <ul>
3036 <li>All
3037 </ul>
3038 <td>
3039 <table>
3040 <tr><th>
3041 <tr><td>BFLOAT16
3042 <tr><td>FLOAT16
3043 <tr><td>FLOAT32
3044 <tr><td>QASYMMS8
3045 <tr><td>QASYMMU8
3046 <tr><td>QSYMMS16
3047 </table>
3048<tr>
3049 <td>CpuAcc
3050 <td>
3051 <ul>
3052 <li>NHWC
3053 <li>NCHW
3054 </ul>
3055 <td>
3056 <table>
3057 <tr><th>
3058 <tr><td>All
3059 </table>
3060<tr>
3061 <td>GpuAcc
3062 <td>
3063 <ul>
3064 <li>NHWC
3065 <li>NCHW
3066 </ul>
3067 <td>
3068 <table>
3069 <tr><th>
3070 <tr><td>All
3071 </table>
3072<tr>
3073 <td rowspan="3">SplitterLayer
3074 <td rowspan="3" style="width:200px;"> Layer to split a tensor along a given axis.
3075 <td rowspan="3">
3076 <ul>
3077 <li>ANEURALNETWORKS_SPLIT
3078 </ul>
3079 <td>CpuRef
3080 <td>
3081 <ul>
3082 <li>All
3083 </ul>
3084 <td>
3085 <table>
3086 <tr><th>
3087 <tr><td>BFLOAT16
3088 <tr><td>FLOAT16
3089 <tr><td>FLOAT32
3090 <tr><td>QASYMMS8
3091 <tr><td>QASYMMU8
3092 <tr><td>QSYMMS16
3093 </table>
3094<tr>
3095 <td>CpuAcc
3096 <td>
3097 <ul>
3098 <li>All
3099 </ul>
3100 <td>
3101 <table>
3102 <tr><th>
3103 <tr><td>All
3104 </table>
3105<tr>
3106 <td>GpuAcc
3107 <td>
3108 <ul>
3109 <li>All
3110 </ul>
3111 <td>
3112 <table>
3113 <tr><th>
3114 <tr><td>All
3115 </table>
3116<tr>
3117 <td rowspan="3">StackLayer
3118 <td rowspan="3" style="width:200px;"> Layer to stack tensors along an axis.
3119 <td rowspan="3">
3120 <ul>
3121 <li>N/A
3122 </ul>
3123 <td>CpuRef
3124 <td>
3125 <ul>
3126 <li>All
3127 </ul>
3128 <td>
3129 <table>
3130 <tr><th>
3131 <tr><td>BFLOAT16
3132 <tr><td>FLOAT16
3133 <tr><td>FLOAT32
3134 <tr><td>QASYMMS8
3135 <tr><td>QASYMMU8
3136 <tr><td>QSYMMS16
3137 </table>
3138<tr>
3139 <td>CpuAcc
3140 <td>
3141 <ul>
3142 <li>All
3143 </ul>
3144 <td>
3145 <table>
3146 <tr><th>
3147 <tr><td>All
3148 </table>
3149<tr>
3150 <td>GpuAcc
3151 <td>
3152 <ul>
3153 <li>All
3154 </ul>
3155 <td>
3156 <table>
3157 <tr><th>
3158 <tr><td>All
3159 </table>
3160<tr>
3161 <td rowspan="1">StandInLayer
3162 <td rowspan="1" style="width:200px;"> A layer to represent "unknown" or "unsupported" operations in the input graph. It has a configurable number of input and output slots and an optional name.
3163 <td rowspan="1">
3164 <ul>
3165 <li>N/A
3166 </ul>
3167 <td>N/A
3168 <td>N/A
3169 <td>N/A
3170<tr>
3171 <td rowspan="3">StridedSliceLayer
3172 <td rowspan="3" style="width:200px;"> Layer to extract a strided slice of a tensor.
3173 <td rowspan="3">
3174 <ul>
3175 <li>ANEURALNETWORKS_STRIDED_SLICE
3176 </ul>
3177 <td>CpuRef
3178 <td>
3179 <ul>
3180 <li>All
3181 </ul>
3182 <td>
3183 <table>
3184 <tr><th>
3185 <tr><td>BFLOAT16
3186 <tr><td>FLOAT32
3187 <tr><td>QASYMMS8
3188 <tr><td>QASYMMU8
3189 <tr><td>QSYMMS16
3190 </table>
3191<tr>
3192 <td>CpuAcc
3193 <td>
3194 <ul>
3195 <li>All
3196 </ul>
3197 <td>
3198 <table>
3199 <tr><th>
3200 <tr><td>All
3201 </table>
3202<tr>
3203 <td>GpuAcc
3204 <td>
3205 <ul>
3206 <li>All
3207 </ul>
3208 <td>
3209 <table>
3210 <tr><th>
3211 <tr><td>All
3212 </table>
3213<tr>
3214 <td rowspan="3">SubtractionLayer
3215 <td rowspan="3" style="width:200px;"> Layer to perform an elementwise subtract of 2 tensors.
3216 <td rowspan="3">
3217 <ul>
3218 <li>ANEURALNETWORKS_SUB
3219 </ul>
3220 <td>CpuRef
3221 <td>
3222 <ul>
3223 <li>All
3224 </ul>
3225 <td>
3226 <table>
3227 <tr><th>
3228 <tr><td>BFLOAT16
3229 <tr><td>FLOAT16
3230 <tr><td>FLOAT32
3231 <tr><td>QASYMMS8
3232 <tr><td>QASYMMU8
3233 <tr><td>QSYMMS16
3234 <tr><td>SIGNED32
3235 </table>
3236<tr>
3237 <td>CpuAcc
3238 <td>
3239 <ul>
3240 <li>All
3241 </ul>
3242 <td>
3243 <table>
3244 <tr><th>
3245 <tr><td>QASYMMU8
3246 <tr><td>QASYMMS8
3247 <tr><td>QSYMMS16
3248 <tr><td>SIGNED32
3249 <tr><td>FLOAT16
3250 <tr><td>FLOAT32
3251 </table>
3252<tr>
3253 <td>GpuAcc
3254 <td>
3255 <ul>
3256 <li>All
3257 </ul>
3258 <td>
3259 <table>
3260 <tr><th>
3261 <tr><td>QASYMMU8
3262 <tr><td>QASYMMS8
3263 <tr><td>QSYMMS16
3264 <tr><td>SIGNED32
3265 <tr><td>FLOAT16
3266 <tr><td>FLOAT32
3267 </table>
3268<tr>
3269 <td rowspan="3">TransposeConvolution2dLayer
3270 <td rowspan="3" style="width:200px;"> Layer to perform 2D transpose convolution (deconvolution) operation.
3271 <td rowspan="3">
3272 <ul>
3273 <li>ANEURALNETWORKS_TRANSPOSE_CONV_2D
3274 </ul>
3275 <td>CpuRef
3276 <td>
3277 <ul>
3278 <li>All
3279 </ul>
3280 <td>
3281 <table>
3282 <tr><th>
3283 <tr><td>BFLOAT16
3284 <tr><td>FLOAT16
3285 <tr><td>FLOAT32
3286 <tr><td>QASYMMS8
3287 <tr><td>QASYMMU8
3288 <tr><td>QSYMMS8
3289 <tr><td>QSYMMS16
3290 </table>
3291<tr>
3292 <td>CpuAcc
3293 <td>
3294 <ul>
3295 <li>NHWC
3296 <li>NCHW
3297 </ul>
3298 <td>
3299 <table>
3300 <tr><th>
3301 <tr><td>SIGNED32
3302 <tr><td>FLOAT16
3303 <tr><td>FLOAT32
3304 <tr><td>QASYMMU8
3305 <tr><td>QASYMMS8
3306 <tr><td>QUANTIZEDSYMM8PERAXIS
3307 </table>
3308<tr>
3309 <td>GpuAcc
3310 <td>
3311 <ul>
3312 <li>NHWC
3313 <li>NCHW
3314 </ul>
3315 <td>
3316 <table>
3317 <tr><th>
3318 <tr><td>SIGNED32
3319 <tr><td>FLOAT16
3320 <tr><td>FLOAT32
3321 <tr><td>QASYMMU8
3322 <tr><td>QASYMMS8
3323 <tr><td>QUANTIZEDSYMM8PERAXIS
3324 </table>
3325<tr>
3326 <td rowspan="3">TransposeLayer
3327 <td rowspan="3" style="width:200px;"> Layer to transpose a tensor.
3328 <td rowspan="3">
3329 <ul>
3330 <li>ANEURALNETWORKS_TRANSPOSE
3331 </ul>
3332 <td>CpuRef
3333 <td>
3334 <ul>
3335 <li>All
3336 </ul>
3337 <td>
3338 <table>
3339 <tr><th>
3340 <tr><td>BFLOAT16
3341 <tr><td>FLOAT16
3342 <tr><td>FLOAT32
3343 <tr><td>QASYMMS8
3344 <tr><td>QASYMMU8
3345 <tr><td>QSYMMS16
3346 </table>
3347<tr>
3348 <td>CpuAcc
3349 <td>
3350 <ul>
3351 <li>All
3352 </ul>
3353 <td>
3354 <table>
3355 <tr><th>
3356 <tr><td>All
3357 </table>
3358<tr>
3359 <td>GpuAcc
3360 <td>
3361 <ul>
3362 <li>All
3363 </ul>
3364 <td>
3365 <table>
3366 <tr><th>
3367 <tr><td>All
3368 </table>
3369<tr>
3370 <td rowspan="3">UnidirectionalSquenceLstmLayer
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01003371 <td rowspan="3" style="width:200px;"> Layer to perform unidirectional sequence LSTM operation.
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01003372 <td rowspan="3">
3373 <ul>
3374 <li>ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM
3375 </ul>
3376 <td>CpuRef
3377 <td>
3378 <ul>
3379 <li>All
3380 </ul>
3381 <td>
3382 <table>
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01003383 <tr><th>Input Types
3384 <tr><td>FLOAT32
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01003385 </table>
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01003386 <table>
3387 <tr><th>Weight Types
3388 <tr><td>FLOAT32
3389 <tr><td>QASYMMS8
3390 </table>
Cathal Corbettfd5bec42022-03-03 15:13:23 +00003391 <td>CpuAcc
3392 <td>
3393 <ul>
3394 <li>All
3395 </ul>
3396 <td>
3397 <table>
3398 <tr><th>Input Types
3399 <tr><td>FLOAT32
3400 </table>
3401 <table>
3402 <tr><th>Weight Types
3403 <tr><td>FLOAT32
3404 </table>
Cathal Corbett4952a3e2022-03-03 15:14:18 +00003405 <td>GpuAcc
3406 <td>
3407 <ul>
3408 <li>All
3409 </ul>
3410 <td>
3411 <table>
3412 <tr><th>Input Types
3413 <tr><td>FLOAT32
3414 </table>
3415 <table>
3416 <tr><th>Weight Types
3417 <tr><td>FLOAT32
3418 </table>
Sadik Armagan1a9c9f62021-08-05 09:25:15 +01003419<tr>
3420 <td rowspan="3">UnmapLayer
3421 <td rowspan="3" style="width:200px;"> Layer to perform unmap operation on tensor.
3422 <td rowspan="3">
3423 <ul>
3424 <li>N/A
3425 </ul>
3426 <td>CpuRef
3427 <td>
3428 <ul>
3429 <li>All
3430 </ul>
3431 <td>
3432 <table>
3433 <tr><th>
3434 <tr><td>All
3435 </table>
3436<tr>
3437 <td>CpuAcc
3438 <td>
3439 <ul>
3440 <li>NHWC
3441 <li>NCHW
3442 </ul>
3443 <td>
3444 <table>
3445 <tr><th>
3446 <tr><td>All
3447 </table>
3448<tr>
3449 <td>GpuAcc
3450 <td>
3451 <ul>
3452 <li>NHWC
3453 <li>NCHW
3454 </ul>
3455 <td>
3456 <table>
3457 <tr><th>
3458 <tr><td>All
3459 </table>
3460</table>
3461
3462*/
3463} // namespace