blob: fc412657383aeeecd1f388b987a9b7d0a02b2e8e [file] [log] [blame]
Sheri Zhanga47dcc22021-04-22 14:41:12 +01001///
2/// Copyright (c) 2021 Arm Limited.
3///
4/// SPDX-License-Identifier: MIT
5///
6/// Permission is hereby granted, free of charge, to any person obtaining a copy
7/// of this software and associated documentation files (the "Software"), to
8/// deal in the Software without restriction, including without limitation the
9/// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10/// sell copies of the Software, and to permit persons to whom the Software is
11/// furnished to do so, subject to the following conditions:
12///
13/// The above copyright notice and this permission notice shall be included in all
14/// copies or substantial portions of the Software.
15///
16/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22/// SOFTWARE.
23///
24namespace arm_compute
25{
26/**
27@page operators_list Supported Operators
28
29@tableofcontents
30
31@section S9_1_operators_list Supported Operators
32
33Compute Library supports operators that are listed in below table.
34
35Compute Library supports a wide list of data-types, information can been directly found in the documentation of each kernel/function.
36The main data-types that the Machine Learning functions support are the following:
37 <ul>
38 <li>BFLOAT16: 16-bit non-standard brain floating point
39 <li>QASYMM8: 8-bit unsigned asymmetric quantized
40 <li>QASYMM8_SIGNED: 8-bit signed asymmetric quantized
41 <li>QSYMM8_PER_CHANNEL: 8-bit signed symmetric quantized (Used for the weights)
42 <li>QSYMM8: 8-bit unsigned symmetric quantized
43 <li>QSYMM16: 16-bit unsigned symmetric quantized
44 <li>F32: 32-bit single precision floating point
45 <li>F16: 16-bit half precision floating point
46 <li>S32: 32-bit signed integer
47 <li>U8: 8-bit unsigned char
48 <li>All: include all above data types
49 </ul>
50
51Compute Library supports the following data layouts (fast changing dimension from right to left):
52 <ul>
53 <li>NHWC: The native layout of Compute Library that delivers the best performance where channels are in the fastest changing dimension
54 <li>NCHW: Legacy layout where width is in the fastest changing dimension
55 <li>All: include all above data layouts
56 </ul>
57where N = batches, C = channels, H = height, W = width
58
59<table>
60<caption id="multi_row"></caption>
61<tr>
62 <th>Function
63 <th>Description
64 <th>Equivalent Android NNAPI Op
65 <th>Backends
66 <th>Data Layouts
67 <th>Data Types
68<tr>
69 <td rowspan="2">ActivationLayer
70 <td rowspan="2" style="width:200px;"> Function to simulate an activation layer with the specified activation function.
71 <td rowspan="2">
72 <ul>
73 <li>ANEURALNETWORKS_ELU
74 <li>ANEURALNETWORKS_HARD_SWISH
75 <li>ANEURALNETWORKS_LOGISTIC
76 <li>ANEURALNETWORKS_RELU
77 <li>ANEURALNETWORKS_RELU1
78 <li>ANEURALNETWORKS_RELU6
79 <li>ANEURALNETWORKS_TANH
80 </ul>
81 <td>NEActivationLayer
82 <td>
83 <ul>
84 <li>All
85 </ul>
86 <td>
87 <table>
88 <tr><th>src<th>dst
89 <tr><td>QASYMM8<td>QASYMM8
90 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
91 <tr><td>QSYMM16<td>QSYMM16
92 <tr><td>F16<td>F16
93 <tr><td>F32<td>F32
94 </table>
95<tr>
96 <td>CLActivationLayer
97 <td>
98 <ul>
99 <li>All
100 </ul>
101 <td>
102 <table>
103 <tr><th>src<th>dst
104 <tr><td>QASYMM8<td>QASYMM8
105 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
106 <tr><td>QSYMM16<td>QSYMM16
107 <tr><td>F16<td>F16
108 <tr><td>F32<td>F32
109 </table>
110<tr>
Teresa Charlin62687422021-04-28 10:58:49 +0100111 <td rowspan="2">ArgMinMaxLayer
112 <td rowspan="2" style="width:200px;"> Function to calculate the index of the minimum or maximum values in a tensor based on an axis.
113 <td rowspan="2">
114 <ul>
115 <li>ANEURALNETWORKS_ARGMAX
116 <li>ANEURALNETWORKS_ARGMIN
117 </ul>
118 <td>NEArgMinMaxLayer
119 <td>
120 <ul>
121 <li>All
122 </ul>
123 <td>
124 <table>
125 <tr><th>src<th>dst
126 <tr><td>QASYMM8<td>U32, S32
127 <tr><td>QASYMM8_SIGNED<td>U32, S32
128 <tr><td>S32<td>U32, S32
129 <tr><td>F16<td>U32, S32
130 <tr><td>F32<td>U32, S32
131 </table>
132<tr>
133 <td>CLArgMinMaxLayer
134 <td>
135 <ul>
136 <li>All
137 </ul>
138 <td>
139 <table>
140 <tr><th>src<th>dst
141 <tr><td>QASYMM8<td>U32, S32
142 <tr><td>QASYMM8_SIGNED<td>U32, S32
143 <tr><td>S32<td>U32, S32
144 <tr><td>F16<td>U32, S32
145 <tr><td>F32<td>U32, S32
146 </table>
147<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +0100148 <td rowspan="1">ArithmeticAddition
149 <td rowspan="1" style="width:200px;"> Function to add 2 tensors.
150 <td rowspan="1">
151 <ul>
152 <li>ANEURALNETWORKS_ADD
153 </ul>
154 <td>NEArithmeticAddition
155 <td>
156 <ul>
157 <li>All
158 </ul>
159 <td>
160 <table>
161 <tr><th>src0<th>src1<th>dst
162 <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
163 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
164 <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
165 <tr><td>QSYMM16<td>QSYMM16<td>S32
166 <tr><td>U8<td>U8<td>U8
167 <tr><td>U8<td>U8<td>S16
168 <tr><td>U8<td>S16<td>S16
169 <tr><td>S16<td>U8<td>S16
170 <tr><td>S16<td>S16<td>S16
171 <tr><td>S32<td>S32<td>S32
172 <tr><td>F16<td>F16<td>F16
173 <tr><td>F32<td>F32<td>F32
174 </table>
175<tr>
176 <td rowspan="1">ArithmeticSubtraction
177 <td rowspan="1" style="width:200px;"> Function to substract 2 tensors.
178 <td rowspan="1">
179 <ul>
180 <li>ANEURALNETWORKS_SUB
181 </ul>
182 <td>NEArithmeticSubtraction
183 <td>
184 <ul>
185 <li>All
186 </ul>
187 <td>
188 <table>
189 <tr><th>src0<th>src1<th>dst
190 <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
191 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
192 <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
193 <tr><td>QSYMM16<td>QSYMM16<td>S32
194 <tr><td>U8<td>U8<td>U8
195 <tr><td>U8<td>U8<td>S16
196 <tr><td>U8<td>S16<td>S16
197 <tr><td>S16<td>U8<td>S16
198 <tr><td>S16<td>S16<td>S16
199 <tr><td>S32<td>S32<td>S32
200 <tr><td>F16<td>F16<td>F16
201 <tr><td>F32<td>F32<td>F32
202 </table>
203<tr>
Teresa Charlin62687422021-04-28 10:58:49 +0100204 <td rowspan="2">BatchNormalizationLayer
205 <td rowspan="2" style="width:200px;"> Function to perform batch normalization.
206 <td rowspan="2">
207 <ul>
208 <li>n/a
209 </ul>
210 <td>NEBatchNormalizationLayer
211 <td>
212 <ul>
213 <li>NHWC
214 <li>NCHW
215 </ul>
216 <td>
217 <table>
218 <tr><th>src<th>dst
219 <tr><td>F32<td>F32
220 <tr><td>F16<td>F16
221 </table>
222<tr>
223 <td>CLBatchNormalizationLayer
224 <td>
225 <ul>
226 <li>NHWC
227 <li>NCHW
228 </ul>
229 <td>
230 <table>
231 <tr><th>src<th>dst
232 <tr><td>F32<td>F32
233 <tr><td>F16<td>F16
234 </table>
235<tr>
236 <td rowspan="2">BatchToSpaceLayer
237 <td rowspan="2" style="width:200px;"> Batch to space transformation.
238 <td rowspan="2">
239 <ul>
240 <li>ANEURALNETWORKS_BATCH_TO_SPACE_ND
241 </ul>
242 <td>NEBatchToSpaceLayer
243 <td>
244 <ul>
245 <li>NHWC
246 <li>NCHW
247 </ul>
248 <td>
249 <table>
250 <tr><th>src0<th>src1<th>dst
251 <tr><td>All<td>s32<td>All
252 </table>
253<tr>
254 <td>CLBatchToSpaceLayer
255 <td>
256 <ul>
257 <li>NHWC
258 <li>NCHW
259 </ul>
260 <td>
261 <table>
262 <tr><th>src0<th>src1<th>dst
263 <tr><td>All<td>s32<td>All
264 </table>
265<tr>
266 <td rowspan="2">BitwiseAnd
267 <td rowspan="2" style="width:200px;"> Function to performe bitwise AND between 2 tensors.
268 <td rowspan="2">
269 <ul>
270 <li>ANEURALNETWORKS_LOGICAL_AND
271 </ul>
272 <td>NEBitwiseAnd
273 <td>
274 <ul>
275 <li>All
276 </ul>
277 <td>
278 <table>
279 <tr><th>src<th>dst
280 <tr><td>U8<td>U8
281 </table>
282<tr>
283 <td>CLBitwiseAnd
284 <td>
285 <ul>
286 <li>All
287 </ul>
288 <td>
289 <table>
290 <tr><th>src<th>dst
291 <tr><td>U8<td>U8
292 </table>
293<tr>
294 <td rowspan="2">BitwiseNot
295 <td rowspan="2" style="width:200px;"> Function to performe bitwise NOT.
296 <td rowspan="2">
297 <ul>
298 <li>ANEURALNETWORKS_LOGICAL_NOT
299 </ul>
300 <td>NEBitwiseNot
301 <td>
302 <ul>
303 <li>All
304 </ul>
305 <td>
306 <table>
307 <tr><th>src<th>dst
308 <tr><td>U8<td>U8
309 </table>
310<tr>
311 <td>CLBitwiseNot
312 <td>
313 <ul>
314 <li>All
315 </ul>
316 <td>
317 <table>
318 <tr><th>src<th>dst
319 <tr><td>U8<td>U8
320 </table>
321<tr>
322 <td rowspan="2">BitwiseOr
323 <td rowspan="2" style="width:200px;"> Function to performe bitwise OR between 2 tensors.
324 <td rowspan="2">
325 <ul>
326 <li>ANEURALNETWORKS_LOGICAL_OR
327 </ul>
328 <td>NEBitwiseOr
329 <td>
330 <ul>
331 <li>All
332 </ul>
333 <td>
334 <table>
335 <tr><th>src<th>dst
336 <tr><td>U8<td>U8
337 </table>
338<tr>
339 <td>CLBitwiseOr
340 <td>
341 <ul>
342 <li>All
343 </ul>
344 <td>
345 <table>
346 <tr><th>src<th>dst
347 <tr><td>U8<td>U8
348 </table>
349<tr>
350 <td rowspan="2">BitwiseXor
351 <td rowspan="2" style="width:200px;"> Function to performe bitwise XOR between 2 tensors.
352 <td rowspan="2">
353 <ul>
354 <li>n/a
355 </ul>
356 <td>NEBitwiseXor
357 <td>
358 <ul>
359 <li>All
360 </ul>
361 <td>
362 <table>
363 <tr><th>src<th>dst
364 <tr><td>U8<td>U8
365 </table>
366<tr>
367 <td>CLBitwiseXor
368 <td>
369 <ul>
370 <li>All
371 </ul>
372 <td>
373 <table>
374 <tr><th>src<th>dst
375 <tr><td>U8<td>U8
376 </table>
377<tr>
378 <td rowspan="2">BoundingBoxTransform
379 <td rowspan="2" style="width:200px;"> Transform proposal bounding boxes to target bounding box using bounding box deltas.
380 <td rowspan="2">
381 <ul>
382 <li>n/a
383 </ul>
384 <td>NEBoundingBoxTransform
385 <td>
386 <ul>
387 <li>NHWC
388 <li>NCHW
389 </ul>
390 <td>
391 <table>
392 <tr><th>src0<th>src1<th>dst
393 <tr><td>QASYMM16<td>QASYMM8<td>QASYMM16
394 <tr><td>F16<td>F16<td>F16
395 <tr><td>F32<td>F32<td>F32
396 </table>
397<tr>
398 <td>CLBoundingBoxTransform
399 <td>
400 <ul>
401 <li>NHWC
402 <li>NCHW
403 </ul>
404 <td>
405 <table>
406 <tr><th>src0<th>src1<th>dst
407 <tr><td>QASYMM16<td>QASYMM8<td>QASYMM16
408 <tr><td>F16<td>F16<td>F16
409 <tr><td>F32<td>F32<td>F32
410 </table>
411<tr>
412 <td rowspan="2">Cast
413 <td rowspan="2" style="width:200px;"> Function to cast a tensor.
414 <td rowspan="2">
415 <ul>
416 <li>ANEURALNETWORKS_CAST
417 </ul>
418 <td>NECast
419 <td>
420 <ul>
421 <li>All
422 </ul>
423 <td>
424 <table>
425 <tr><th>src<th>dst
426 <tr><td>QASYMM8_SIGNED<td>S16, S32, F32, F16
427 <tr><td>QASYMM8<td>U16, S16, S32, F32, F16
428 <tr><td>U8<td>U16, S16, S32, F32, F16
429 <tr><td>U16<td>U8, U32
430 <tr><td>S16<td>QASYMM8_SIGNED, U8, S32
431 <tr><td>F16<td>QASYMM8_SIGNED, QASYMM8, F32, S32, U8
432 <tr><td>S32<td>QASYMM8_SIGNED, QASYMM8, F16, F32, U8
433 <tr><td>F32<td>QASYMM8_SIGNED, QASYMM8, BFLOAT16, F16, S32, U8
434 </table>
435<tr>
436 <td>CLCast
437 <td>
438 <ul>
439 <li>All
440 </ul>
441 <td>
442 <table>
443 <tr><th>src<th>dst
444 <tr><td>U8<td>S8, U16, S16, U32, S32, F16, F32
445 <tr><td>U16<td>U8, S8, S16, U32, S32, F16, F32
446 <tr><td>S16<td>U8, S8, U16, U32, S32, F16, F32
447 <tr><td>U32<td>U8, S8, U16, S16, S32, F16, F32
448 <tr><td>S32<td>U8, S8, U16, S16, U32, F16, F32
449 <tr><td>F16<td>U8, S8, U16, S16, U32, F32
450 <tr><td>F32<td>U8, S8, U16, S16, U32, F16
451 </table>
452<tr>
453 <td rowspan="2">ChannelShuffleLayer
454 <td rowspan="2" style="width:200px;"> Function to shuffle the channels of the input tensor.
455 <td rowspan="2">
456 <ul>
457 <li>ANEURALNETWORKS_CHANNEL_SHUFFLE
458 </ul>
459 <td>NEChannelShuffleLayer
460 <td>
461 <ul>
462 <li>NCHW
463 </ul>
464 <td>
465 <table>
466 <tr><th>src<th>dst
467 <tr><td>All<td>All
468 </table>
469<tr>
470 <td>CLChannelShuffleLayer
471 <td>
472 <ul>
473 <li>NCHW
474 </ul>
475 <td>
476 <table>
477 <tr><th>src<th>dst
478 <tr><td>All<td>All
479 </table>
480<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +0100481 <td rowspan="1">Comparison
482 <td rowspan="1" style="width:200px;"> Function to compare 2 tensors.
483 <td rowspan="1">
484 <ul>
485 <li>ANEURALNETWORKS_EQUAL
486 <li>ANEURALNETWORKS_GREATER
487 <li>ANEURALNETWORKS_GREATER_EQUAL
488 <li>ANEURALNETWORKS_LESS
489 <li>ANEURALNETWORKS_LESS_EQUAL
490 <li>ANEURALNETWORKS_NOT_EQUAL
491 </ul>
492 <td>CLComparison
493 <td>
494 <ul>
495 <li>All
496 </ul>
497 <td>
498 <table>
499 <tr><th>src0<th>src1<th>dst
500 <tr><td>All<td>All<td>U8
501 </table>
502<tr>
Sheri Zhanga47dcc22021-04-22 14:41:12 +0100503 <td rowspan="2">ConcatenateLayer
504 <td rowspan="2" style="width:200px;"> Function to concatenate tensors along a given axis.
505 <td rowspan="2">
506 <ul>
507 <li>ANEURALNETWORKS_CONCATENATION
508 </ul>
509 <td>NEConcatenateLayer
510 <td>
511 <ul>
512 <li>All
513 </ul>
514 <td>
515 <table>
516 <tr><th>src<th>dst
517 <tr><td>QASYMM8<td>QASYMM8
518 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
519 <tr><td>F16<td>F16
520 <tr><td>F32<td>F32
521 </table>
522<tr>
523 <td>CLConcatenateLayer
524 <td>
525 <ul>
526 <li>All
527 </ul>
528 <td>
529 <table>
530 <tr><th>src<th>dst
531 <tr><td>QASYMM8<td>QASYMM8
532 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
533 <tr><td>F16<td>F16
534 <tr><td>F32<td>F32
535 </table>
536<tr>
537 <td rowspan="2">ConvertFullyConnectedWeights
538 <td rowspan="2" style="width:200px;"> Function to tranpose the wieghts for the fully connected layer.
539 <td rowspan="2">
540 <ul>
Teresa Charlin62687422021-04-28 10:58:49 +0100541 <li>n/a
Sheri Zhanga47dcc22021-04-22 14:41:12 +0100542 </ul>
543 <td>NEConvertFullyConnectedWeights
544 <td>
545 <ul>
546 <li>NHWC
547 <li>NCHW
548 </ul>
549 <td>
550 <table>
551 <tr><th>src<th>dst
552 <tr><td>All<td>All
553 </table>
554<tr>
555 <td>CLConvertFullyConnectedWeights
556 <td>
557 <ul>
558 <li>NHWC
559 <li>NCHW
560 </ul>
561 <td>
562 <table>
563 <tr><th>src<th>dst
564 <tr><td>All<td>All
565 </table>
566<tr>
Teresa Charlin62687422021-04-28 10:58:49 +0100567 <td rowspan="2">ConvolutionLayer
568 <td rowspan="2" style="width:200px;"> Function to compute a convolution layer.
569 <td rowspan="2">
570 <ul>
571 <li>ANEURALNETWORKS_CONV_2D
572 </ul>
573 <td>NEConvolutionLayer
574 <td>
575 <ul>
576 <li>NHWC
577 <li>NCHW
578 </ul>
579 <td>
580 <table>
581 <tr><th>src0<th>src1<th>src2<th>dst
582 <tr><td>F16<td>F16<td>F16<td>F16
583 <tr><td>F32<td>F32<td>F32<td>F32
584 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
585 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
586 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
587 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
588 </table>
589<tr>
590 <td>CLConvolutionLayer
591 <td>
592 <ul>
593 <li>NHWC
594 <li>NCHW
595 </ul>
596 <td>
597 <table>
598 <tr><th>src0<th>src1<th>src2<th>dst
599 <tr><td>F16<td>F16<td>F16<td>F16
600 <tr><td>F32<td>F32<td>F32<td>F32
601 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
602 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
603 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
604 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
605 </table>
606<tr>
Sheri Zhanga47dcc22021-04-22 14:41:12 +0100607 <td rowspan="2">Copy
608 <td rowspan="2" style="width:200px;"> Function to copy a tensor.
609 <td rowspan="2">
610 <ul>
Teresa Charlin62687422021-04-28 10:58:49 +0100611 <li>n/a
Sheri Zhanga47dcc22021-04-22 14:41:12 +0100612 </ul>
613 <td>NECopy
614 <td>
615 <ul>
616 <li>All
617 </ul>
618 <td>
619 <table>
620 <tr><th>src<th>dst
621 <tr><td>All<td>All
622 </table>
623<tr>
624 <td>CLCopy
625 <td>
626 <ul>
627 <li>All
628 </ul>
629 <td>
630 <table>
631 <tr><th>src<th>dst
632 <tr><td>All<td>All
633 </table>
634<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +0100635 <td rowspan="1">Crop
636 <td rowspan="1" style="width:200px;"> Performs a copy of input tensor to the output tensor.
637 <td rowspan="1">
638 <ul>
639 <li>n/a
640 </ul>
641 <td>CLCrop
642 <td>
643 <ul>
644 <li>NHWC
645 </ul>
646 <td>
647 <table>
648 <tr><th>src<th>dst
649 <tr><td>All<td>F32
650 </table>
651<tr>
Teresa Charlin62687422021-04-28 10:58:49 +0100652 <td rowspan="2">CropResize
653 <td rowspan="2" style="width:200px;"> Function to perform cropping and resizing.
654 <td rowspan="2">
655 <ul>
656 <li>n/a
657 </ul>
658 <td>NECropResize
659 <td>
660 <ul>
661 <li>NHWC
662 </ul>
663 <td>
664 <table>
665 <tr><th>src0<th>src1<th>src2<th>dst
666 <tr><td>All<td>F32<td>F32<td>F32
667 </table>
668<tr>
669 <td>CLCropResize
670 <td>
671 <ul>
672 <li>NHWC
673 </ul>
674 <td>
675 <table>
676 <tr><th>src0<th>src1<th>src2<th>dst
677 <tr><td>All<td>F32<td>F32<td>F32
678 </table>
679<tr>
680 <td rowspan="2">DeconvolutionLayer
681 <td rowspan="2" style="width:200px;"> Function to compute a deconvolution or tranpose convolution.
682 <td rowspan="2">
683 <ul>
684 <li>ANEURALNETWORKS_TRANSPOSE_CONV_2D
685 </ul>
686 <td>NEDeconvolutionLayer
687 <td>
688 <ul>
689 <li>NHWC
690 <li>NCHW
691 </ul>
692 <td>
693 <table>
694 <tr><th>src0<th>src1<th>src2<th>dst
695 <tr><td>F16<td>F16<td>F16<td>F16
696 <tr><td>F32<td>F32<td>F32<td>F32
697 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
698 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
699 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
700 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
701 </table>
702<tr>
703 <td>CLDeconvolutionLayer
704 <td>
705 <ul>
706 <li>NHWC
707 <li>NCHW
708 </ul>
709 <td>
710 <table>
711 <tr><th>src0<th>src1<th>src2<th>dst
712 <tr><td>F16<td>F16<td>F16<td>F16
713 <tr><td>F32<td>F32<td>F32<td>F32
714 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
715 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
716 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
717 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
718 </table>
719<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +0100720 <td rowspan="1">DeconvolutionLayerUpsample
721 <td rowspan="1" style="width:200px;"> Function to execute deconvolution upsample on OpenCL.
722 <td rowspan="1">
723 <ul>
724 <li>ANEURALNETWORKS_TRANSPOSE_CONV_2D
725 </ul>
726 <td>CLDeconvolutionLayerUpsample
727 <td>
728 <ul>
729 <li>NHWC
730 <li>NCHW
731 </ul>
732 <td>
733 <table>
734 <tr><th>src<th>dst
735 <tr><td>All<td>All
736 </table>
737<tr>
Teresa Charlin62687422021-04-28 10:58:49 +0100738 <td rowspan="2">DepthConvertLayer
739 <td rowspan="2" style="width:200px;"> Performs a down-scaling depth conversion.
740 <td rowspan="2">
741 <ul>
742 <li>n/a
743 </ul>
744 <td>NEDepthConvertLayer
745 <td>
746 <ul>
747 <li>All
748 </ul>
749 <td>
750 <table>
751 <tr><th>src<th>dst
752 <tr><td>QASYMM8<td>F16, F32
753 <tr><td>U8<td>U16, S16, S32
754 <tr><td>U16<td>U8, U32
755 <tr><td>S16<td>U8, S32
756 <tr><td>BFLOAT16<td>F32
757 <tr><td>F16<td>QASYMM8, F32
758 <tr><td>F32<td>QASYMM8, F16, BFLOAT16
759 </table>
760<tr>
761 <td>CLDepthConvertLayer
762 <td>
763 <ul>
764 <li>All
765 </ul>
766 <td>
767 <table>
768 <tr><th>src<th>dst
769 <tr><td>U8<td>S8, U16, S16, U32, S32, F16, F32
770 <tr><td>U16<td>U8, S8, S16, U32, S32, F16, F32
771 <tr><td>S16<td>U8, S8, U16, U32, S32, F16, F32
772 <tr><td>U32<td>U8, S8, U16, S16, S32, F16, F32
773 <tr><td>S32<td>U8, S8, U16, S16, U32, F16, F32
774 <tr><td>F16<td>U8, S8, U16, S16, U32, F32
775 <tr><td>F32<td>U8, S8, U16, S16, U32, F16
776 </table>
777<tr>
778 <td rowspan="2">DepthToSpaceLayer
779 <td rowspan="2" style="width:200px;"> Depth to Space transformation.
780 <td rowspan="2">
781 <ul>
782 <li>ANEURALNETWORKS_DEPTH_TO_SPACE
783 </ul>
784 <td>NEDepthToSpaceLayer
785 <td>
786 <ul>
787 <li>NHWC
788 <li>NCHW
789 </ul>
790 <td>
791 <table>
792 <tr><th>src<th>dst
793 <tr><td>All<td>All
794 </table>
795<tr>
796 <td>CLDepthToSpaceLayer
797 <td>
798 <ul>
799 <li>NHWC
800 <li>NCHW
801 </ul>
802 <td>
803 <table>
804 <tr><th>src<th>dst
805 <tr><td>All<td>All
806 </table>
807<tr>
808 <td rowspan="2">DepthwiseConvolutionLayer
809 <td rowspan="2" style="width:200px;"> Function to perform depthwise separable convolution.
810 <td rowspan="2">
811 <ul>
812 <li>ANEURALNETWORKS_DEPTHWISE_CONV_2D
813 </ul>
814 <td>NEDepthwiseConvolutionLayer
815 <td>
816 <ul>
817 <li>NHWC
818 <li>NCHW
819 </ul>
820 <td>
821 <table>
822 <tr><th>src0<th>src1<th>src2<th>dst
823 <tr><td>F16<td>F16<td>F16<td>F16
824 <tr><td>F32<td>F32<td>F32<td>F32
825 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
826 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
827 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
828 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
829 </table>
830<tr>
831 <td>CLDepthwiseConvolutionLayer
832 <td>
833 <ul>
834 <li>NHWC
835 <li>NCHW
836 </ul>
837 <td>
838 <table>
839 <tr><th>src0<th>src1<th>src2<th>dst
840 <tr><td>F16<td>F16<td>F16<td>F16
841 <tr><td>F32<td>F32<td>F32<td>F32
842 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
843 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
844 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
845 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
846 </table>
847<tr>
Sheri Zhanga47dcc22021-04-22 14:41:12 +0100848 <td rowspan="2">DequantizationLayer
Teresa Charlin62687422021-04-28 10:58:49 +0100849 <td rowspan="2" style="width:200px;"> Function to dequantize the values in a tensor.
Sheri Zhanga47dcc22021-04-22 14:41:12 +0100850 <td rowspan="2">
851 <ul>
852 <li>ANEURALNETWORKS_DEQUANTIZE
853 </ul>
854 <td>NEDequantizationLayer
855 <td>
856 <ul>
857 <li>All
858 </ul>
859 <td>
860 <table>
861 <tr><th>src<th>dst
Teresa Charlin62687422021-04-28 10:58:49 +0100862 <tr><td>QASYMM8<td>F16, F32
863 <tr><td>QASYMM8_SIGNED<td>F16, F32
864 <tr><td>QSYMM8_PER_CHANNEL<td>F16, F32
865 <tr><td>QSYMM8<td>F16, F32
866 <tr><td>QSYMM16<td>F16, F32
Sheri Zhanga47dcc22021-04-22 14:41:12 +0100867 </table>
868<tr>
869 <td>CLDequantizationLayer
870 <td>
871 <ul>
872 <li>All
873 </ul>
874 <td>
875 <table>
876 <tr><th>src<th>dst
Teresa Charlin62687422021-04-28 10:58:49 +0100877 <tr><td>QASYMM8<td>F16, F32
878 <tr><td>QASYMM8_SIGNED<td>F16, F32
879 <tr><td>QSYMM8_PER_CHANNEL<td>F16, F32
880 <tr><td>QSYMM8<td>F16, F32
881 <tr><td>QSYMM16<td>F16, F32
Sheri Zhanga47dcc22021-04-22 14:41:12 +0100882 </table>
883<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +0100884 <td rowspan="1">DetectionPostProcessLayer
885 <td rowspan="1" style="width:200px;"> Function to generate the detection output based on center size encoded boxes, class prediction and anchors by doing non maximum suppression (NMS).
886 <td rowspan="1">
887 <ul>
888 <li>ANEURALNETWORKS_DETECTION_POSTPROCESSING
889 </ul>
890 <td>NEDetectionPostProcessLayer
891 <td>
892 <ul>
893 <li>All
894 </ul>
895 <td>
896 <table>
897 <tr><th>src0 - src2<th>dst0 - dst3
898 <tr><td>QASYMM8<td>F32
899 <tr><td>QASYMM8_SIGNED<td>F32
900 <tr><td>F32<td>F32
901 </table>
902<tr>
Sheri Zhanga47dcc22021-04-22 14:41:12 +0100903 <td rowspan="2">DirectConvolutionLayer
Teresa Charlin62687422021-04-28 10:58:49 +0100904 <td rowspan="2" style="width:200px;"> Function to compute direct convolution.
Sheri Zhanga47dcc22021-04-22 14:41:12 +0100905 <td rowspan="2">
906 <ul>
907 <li>ANEURALNETWORKS_CONV_2D
908 </ul>
909 <td>NEDirectConvolutionLayer
910 <td>
911 <ul>
912 <li>NHWC
913 <li>NCHW
914 </ul>
915 <td>
916 <table>
917 <tr><th>src0<th>src1<th>src2<th>dst
918 <tr><td>F16<td>F16<td>F16<td>F16
919 <tr><td>F32<td>F32<td>F32<td>F32
920 </table>
921<tr>
922 <td>CLDirectConvolutionLayer
923 <td>
924 <ul>
925 <li>NHWC
926 <li>NCHW
927 </ul>
928 <td>
929 <table>
930 <tr><th>src0<th>src1<th>src2<th>dst
931 <tr><td>F16<td>F16<td>F16<td>F16
932 <tr><td>F32<td>F32<td>F32<td>F32
933 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
934 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
935 </table>
936<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +0100937 <td rowspan="1">DirectDeconvolutionLayer
938 <td rowspan="1" style="width:200px;"> Function to run the deconvolution layer.
939 <td rowspan="1">
940 <ul>
941 <li>ANEURALNETWORKS_TRANSPOSE_CONV_2D
942 </ul>
943 <td>CLDirectDeconvolutionLayer
944 <td>
945 <ul>
946 <li>NHWC
947 <li>NCHW
948 </ul>
949 <td>
950 <table>
951 <tr><th>src0<th>src1<th>src2<th>dst
952 <tr><td>F16<td>F16<td>F16<td>F16
953 <tr><td>F32<td>F32<td>F32<td>F32
954 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
955 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
956 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
957 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
958 </table>
959<tr>
960 <td rowspan="13">ElementWiseOperations
961 <td rowspan="13" style="width:200px;"> Function to perform in Cpu: - Div - Max - Min - Pow - SquaredDiff - Comparisons (Equal, greater, greater_equal, less, less_equal, not_equal) Function to perform in CL: - Add - Sub - Div - Max - Min - Pow - SquaredDiff
962 <td rowspan="13">
963 <ul>
964 <li>ANEURALNETWORKS_MAXIMUM
965 <li>ANEURALNETWORKS_MINIMUM
966 <li>ANEURALNETWORKS_POW
967 <li>ANEURALNETWORKS_DIV
968 <li>ANEURALNETWORKS_ADD
969 <li>ANEURALNETWORKS_SUB
970 <li>ANEURALNETWORKS_EQUAL
971 <li>ANEURALNETWORKS_GREATER
972 <li>ANEURALNETWORKS_GREATER_EQUAL
973 <li>ANEURALNETWORKS_LESS
974 <li>ANEURALNETWORKS_LESS_EQUAL
975 <li>ANEURALNETWORKS_NOT_EQUAL
976 </ul>
977 <td>NEElementwiseMax
978 <td>
979 <ul>
980 <li>All
981 </ul>
982 <td>
983 <table>
984 <tr><th>src0<th>src1<th>dst
985 <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
986 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
987 <tr><td>S32<td>S32<td>S32
988 <tr><td>S16<td>S16<td>S16
989 <tr><td>F16<td>F16<td>F16
990 <tr><td>F32<td>F32<td>F32
991 </table>
992<tr>
993 <td>NEElementwiseMin
994 <td>
995 <ul>
996 <li>All
997 </ul>
998 <td>
999 <table>
1000 <tr><th>src0<th>src1<th>dst
1001 <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
1002 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
1003 <tr><td>S32<td>S32<td>S32
1004 <tr><td>S16<td>S16<td>S16
1005 <tr><td>F16<td>F16<td>F16
1006 <tr><td>F32<td>F32<td>F32
1007 </table>
1008<tr>
1009 <td>NEElementwiseSquaredDiff
1010 <td>
1011 <ul>
1012 <li>All
1013 </ul>
1014 <td>
1015 <table>
1016 <tr><th>src0<th>src1<th>dst
1017 <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
1018 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
1019 <tr><td>S32<td>S32<td>S32
1020 <tr><td>S16<td>S16<td>S16
1021 <tr><td>F16<td>F16<td>F16
1022 <tr><td>F32<td>F32<td>F32
1023 </table>
1024<tr>
1025 <td>NEElementwiseDivision
1026 <td>
1027 <ul>
1028 <li>All
1029 </ul>
1030 <td>
1031 <table>
1032 <tr><th>src0<th>src1<th>dst
1033 <tr><td>F16<td>F16<td>F16
1034 <tr><td>F32<td>F32<td>F32
1035 </table>
1036<tr>
1037 <td>NEElementwisePower
1038 <td>
1039 <ul>
1040 <li>All
1041 </ul>
1042 <td>
1043 <table>
1044 <tr><th>src0<th>src1<th>dst
1045 <tr><td>F16<td>F16<td>F16
1046 <tr><td>F32<td>F32<td>F32
1047 </table>
1048<tr>
1049 <td>NEElementwiseComparison
1050 <td>
1051 <ul>
1052 <li>All
1053 </ul>
1054 <td>
1055 <table>
1056 <tr><th>src0<th>src1<th>dst
1057 <tr><td>QASYMM8<td>QASYMM8<td>U8
1058 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>U8
1059 <tr><td>S32<td>S32<td>U8
1060 <tr><td>U8<td>U8<td>U8
1061 <tr><td>S16<td>S16<td>U8
1062 <tr><td>F16<td>F16<td>U8
1063 <tr><td>F32<td>F32<td>U8
1064 </table>
1065<tr>
1066 <td>CLArithmeticAddition
1067 <td>
1068 <ul>
1069 <li>All
1070 </ul>
1071 <td>
1072 <table>
1073 <tr><th>src0<th>src1<th>dst
1074 <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
1075 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
1076 <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
1077 <tr><td>U8<td>U8<td>U8
1078 <tr><td>U8<td>U8<td>S16
1079 <tr><td>U8<td>S16<td>S16
1080 <tr><td>S16<td>U8<td>S16
1081 <tr><td>S16<td>S16<td>S16
1082 <tr><td>S32<td>S32<td>S32
1083 <tr><td>F16<td>F16<td>F16
1084 <tr><td>F32<td>F32<td>F32
1085 </table>
1086<tr>
1087 <td>CLArithmeticSubtraction
1088 <td>
1089 <ul>
1090 <li>All
1091 </ul>
1092 <td>
1093 <table>
1094 <tr><th>src0<th>src1<th>dst
1095 <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
1096 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
1097 <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
1098 <tr><td>U8<td>U8<td>U8
1099 <tr><td>U8<td>U8<td>S16
1100 <tr><td>U8<td>S16<td>S16
1101 <tr><td>S16<td>U8<td>S16
1102 <tr><td>S16<td>S16<td>S16
1103 <tr><td>S32<td>S32<td>S32
1104 <tr><td>F16<td>F16<td>F16
1105 <tr><td>F32<td>F32<td>F32
1106 </table>
1107<tr>
1108 <td>CLArithmeticDivision
1109 <td>
1110 <ul>
1111 <li>All
1112 </ul>
1113 <td>
1114 <table>
1115 <tr><th>src0<th>src1<th>dst
1116 <tr><td>F16<td>F16<td>F16
1117 <tr><td>F32<td>F32<td>F32
1118 </table>
1119<tr>
1120 <td>CLElementwiseMax
1121 <td>
1122 <ul>
1123 <li>All
1124 </ul>
1125 <td>
1126 <table>
1127 <tr><th>src0<th>src1<th>dst
1128 <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
1129 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
1130 <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
1131 <tr><td>U8<td>U8<td>U8
1132 <tr><td>S16<td>S16<td>S16
1133 <tr><td>S32<td>S32<td>S32
1134 <tr><td>U32<td>U32<td>U32
1135 <tr><td>F16<td>F16<td>F16
1136 <tr><td>F32<td>F32<td>F32
1137 </table>
1138<tr>
1139 <td>CLElementwiseMin
1140 <td>
1141 <ul>
1142 <li>All
1143 </ul>
1144 <td>
1145 <table>
1146 <tr><th>src0<th>src1<th>dst
1147 <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
1148 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
1149 <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
1150 <tr><td>U8<td>U8<td>U8
1151 <tr><td>S16<td>S16<td>S16
1152 <tr><td>S32<td>S32<td>S32
1153 <tr><td>U32<td>U32<td>U32
1154 <tr><td>F16<td>F16<td>F16
1155 <tr><td>F32<td>F32<td>F32
1156 </table>
1157<tr>
1158 <td>CLElementwiseSquaredDiff
1159 <td>
1160 <ul>
1161 <li>All
1162 </ul>
1163 <td>
1164 <table>
1165 <tr><th>src0<th>src1<th>dst
1166 <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
1167 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
1168 <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
1169 <tr><td>U8<td>U8<td>U8
1170 <tr><td>S16<td>S16<td>S16
1171 <tr><td>F16<td>F16<td>F16
1172 <tr><td>F32<td>F32<td>F32
1173 </table>
1174<tr>
1175 <td>CLElementwisePower
1176 <td>
1177 <ul>
1178 <li>All
1179 </ul>
1180 <td>
1181 <table>
1182 <tr><th>src0<th>src1<th>dst
1183 <tr><td>F16<td>F16<td>F16
1184 <tr><td>F32<td>F32<td>F32
1185 </table>
1186<tr>
1187 <td rowspan="8">ElementwiseUnaryLayer
1188 <td rowspan="8" style="width:200px;"> Function to perform: - Rsqrt - Exp - Neg - Log - Abs - Round - Sin
1189 <td rowspan="8">
1190 <ul>
1191 <li>ANEURALNETWORKS_ABS
1192 <li>ANEURALNETWORKS_EXP
1193 <li>ANEURALNETWORKS_LOG
1194 <li>ANEURALNETWORKS_NEG
1195 <li>ANEURALNETWORKS_RSQRT
1196 <li>ANEURALNETWORKS_SIN
1197 </ul>
1198 <td>NEElementwiseUnaryLayer
1199 <td>
1200 <ul>
1201 <li>All
1202 </ul>
1203 <td>
1204 <table>
1205 <tr><th>src<th>dst
1206 <tr><td>F16<td>F16
1207 <tr><td>F32<td>F32
1208 <tr><td>S32<td>S32
1209 </table>
1210<tr>
1211 <td>CLRsqrtLayer
1212 <td>
1213 <ul>
1214 <li>All
1215 </ul>
1216 <td>
1217 <table>
1218 <tr><th>src<th>dst
1219 <tr><td>F16<td>F16
1220 <tr><td>F32<td>F32
1221 </table>
1222<tr>
1223 <td>CLExpLayer
1224 <td>
1225 <ul>
1226 <li>All
1227 </ul>
1228 <td>
1229 <table>
1230 <tr><th>src<th>dst
1231 <tr><td>F16<td>F16
1232 <tr><td>F32<td>F32
1233 </table>
1234<tr>
1235 <td>CLNegLayer
1236 <td>
1237 <ul>
1238 <li>All
1239 </ul>
1240 <td>
1241 <table>
1242 <tr><th>src<th>dst
1243 <tr><td>F16<td>F16
1244 <tr><td>F32<td>F32
1245 </table>
1246<tr>
1247 <td>CLSinLayer
1248 <td>
1249 <ul>
1250 <li>All
1251 </ul>
1252 <td>
1253 <table>
1254 <tr><th>src<th>dst
1255 <tr><td>F16<td>F16
1256 <tr><td>F32<td>F32
1257 </table>
1258<tr>
1259 <td>CLLogLayer
1260 <td>
1261 <ul>
1262 <li>All
1263 </ul>
1264 <td>
1265 <table>
1266 <tr><th>src<th>dst
1267 <tr><td>F16<td>F16
1268 <tr><td>F32<td>F32
1269 </table>
1270<tr>
1271 <td>CLAbsLayer
1272 <td>
1273 <ul>
1274 <li>All
1275 </ul>
1276 <td>
1277 <table>
1278 <tr><th>src<th>dst
1279 <tr><td>F16<td>F16
1280 <tr><td>F32<td>F32
1281 </table>
1282<tr>
1283 <td>CLRoundLayer
1284 <td>
1285 <ul>
1286 <li>All
1287 </ul>
1288 <td>
1289 <table>
1290 <tr><th>src<th>dst
1291 <tr><td>F16<td>F16
1292 <tr><td>F32<td>F32
1293 </table>
1294<tr>
Sheri Zhanga47dcc22021-04-22 14:41:12 +01001295 <td rowspan="2">FFT1D
Teresa Charlin62687422021-04-28 10:58:49 +01001296 <td rowspan="2" style="width:200px;"> Fast Fourier Transform 1D.
Sheri Zhanga47dcc22021-04-22 14:41:12 +01001297 <td rowspan="2">
1298 <ul>
Teresa Charlin62687422021-04-28 10:58:49 +01001299 <li>n/a
Sheri Zhanga47dcc22021-04-22 14:41:12 +01001300 </ul>
1301 <td>NEFFT1D
1302 <td>
1303 <ul>
1304 <li>All
1305 </ul>
1306 <td>
1307 <table>
1308 <tr><th>src<th>dst
1309 <tr><td>F32<td>F32
1310 </table>
1311<tr>
1312 <td>CLFFT1D
1313 <td>
1314 <ul>
1315 <li>All
1316 </ul>
1317 <td>
1318 <table>
1319 <tr><th>src<th>dst
1320 <tr><td>F32<td>F32
1321 <tr><td>F16<td>F16
1322 </table>
1323<tr>
1324 <td rowspan="2">FFT2D
Teresa Charlin62687422021-04-28 10:58:49 +01001325 <td rowspan="2" style="width:200px;"> Fast Fourier Transform 2D.
Sheri Zhanga47dcc22021-04-22 14:41:12 +01001326 <td rowspan="2">
1327 <ul>
Teresa Charlin62687422021-04-28 10:58:49 +01001328 <li>n/a
Sheri Zhanga47dcc22021-04-22 14:41:12 +01001329 </ul>
1330 <td>NEFFT2D
1331 <td>
1332 <ul>
1333 <li>All
1334 </ul>
1335 <td>
1336 <table>
1337 <tr><th>src<th>dst
1338 <tr><td>F32<td>F32
1339 </table>
1340<tr>
1341 <td>CLFFT2D
1342 <td>
1343 <ul>
1344 <li>All
1345 </ul>
1346 <td>
1347 <table>
1348 <tr><th>src<th>dst
1349 <tr><td>F32<td>F32
1350 <tr><td>F16<td>F16
1351 </table>
1352<tr>
1353 <td rowspan="2">FFTConvolutionLayer
Teresa Charlin62687422021-04-28 10:58:49 +01001354 <td rowspan="2" style="width:200px;"> Fast Fourier Transform Convolution.
Sheri Zhanga47dcc22021-04-22 14:41:12 +01001355 <td rowspan="2">
1356 <ul>
1357 <li>ANEURALNETWORKS_CONV_2D
1358 </ul>
1359 <td>NEFFTConvolutionLayer
1360 <td>
1361 <ul>
1362 <li>All
1363 </ul>
1364 <td>
1365 <table>
1366 <tr><th>src<th>dst
1367 <tr><td>F32<td>F32
1368 </table>
1369<tr>
1370 <td>CLFFTConvolutionLayer
1371 <td>
1372 <ul>
1373 <li>All
1374 </ul>
1375 <td>
1376 <table>
1377 <tr><th>src<th>dst
1378 <tr><td>F32<td>F32
1379 <tr><td>F16<td>F16
1380 </table>
1381<tr>
1382 <td rowspan="2">Fill
Teresa Charlin62687422021-04-28 10:58:49 +01001383 <td rowspan="2" style="width:200px;"> Set the values of a tensor with a given value.
Sheri Zhanga47dcc22021-04-22 14:41:12 +01001384 <td rowspan="2">
1385 <ul>
1386 <li>ANEURALNETWORKS_FILL
1387 </ul>
1388 <td>NEFill
1389 <td>
1390 <ul>
1391 <li>All
1392 </ul>
1393 <td>
1394 <table>
1395 <tr><th>src<th>dst
1396 <tr><td>All<td>All
1397 </table>
1398<tr>
1399 <td>CLFill
1400 <td>
1401 <ul>
1402 <li>All
1403 </ul>
1404 <td>
1405 <table>
1406 <tr><th>src<th>dst
1407 <tr><td>All<td>All
1408 </table>
1409<tr>
Teresa Charlin62687422021-04-28 10:58:49 +01001410 <td rowspan="2">FillBorder
1411 <td rowspan="2" style="width:200px;"> Function to .
1412 <td rowspan="2">
1413 <ul>
1414 <li>n/a
1415 </ul>
1416 <td>NEFillBorder
1417 <td>
1418 <ul>
1419 <li>All
1420 </ul>
1421 <td>
1422 <table>
1423 <tr><th>src<th>dst
1424 <tr><td>All<td>All
1425 </table>
1426<tr>
1427 <td>CLFillBorder
1428 <td>
1429 <ul>
1430 <li>All
1431 </ul>
1432 <td>
1433 <table>
1434 <tr><th>src<th>dst
1435 <tr><td>All<td>All
1436 </table>
1437<tr>
1438 <td rowspan="2">FlattenLayer
1439 <td rowspan="2" style="width:200px;"> Reshape a tensor to be 1D
1440 <td rowspan="2">
1441 <ul>
1442 <li>ANEURALNETWORKS_RESHAPE
1443 </ul>
1444 <td>NEFlattenLayer
1445 <td>
1446 <ul>
1447 <li>All
1448 </ul>
1449 <td>
1450 <table>
1451 <tr><th>src<th>dst
1452 <tr><td>All<td>All
1453 </table>
1454<tr>
1455 <td>CLFlattenLayer
1456 <td>
1457 <ul>
1458 <li>All
1459 </ul>
1460 <td>
1461 <table>
1462 <tr><th>src<th>dst
1463 <tr><td>All<td>All
1464 </table>
1465<tr>
Sheri Zhanga47dcc22021-04-22 14:41:12 +01001466 <td rowspan="2">Floor
Teresa Charlin62687422021-04-28 10:58:49 +01001467 <td rowspan="2" style="width:200px;"> Round the value to the lowest number.
Sheri Zhanga47dcc22021-04-22 14:41:12 +01001468 <td rowspan="2">
1469 <ul>
1470 <li>ANEURALNETWORKS_FLOOR
1471 </ul>
1472 <td>NEFloor
1473 <td>
1474 <ul>
1475 <li>All
1476 </ul>
1477 <td>
1478 <table>
1479 <tr><th>src<th>dst
1480 <tr><td>F32<td>F32
1481 <tr><td>F16<td>F16
1482 </table>
1483<tr>
1484 <td>CLFloor
1485 <td>
1486 <ul>
1487 <li>All
1488 </ul>
1489 <td>
1490 <table>
1491 <tr><th>src<th>dst
1492 <tr><td>F32<td>F32
1493 <tr><td>F16<td>F16
1494 </table>
1495<tr>
Teresa Charlin62687422021-04-28 10:58:49 +01001496 <td rowspan="2">FullyConnectedLayer
1497 <td rowspan="2" style="width:200px;"> Function to perform a fully connected / dense layer.
1498 <td rowspan="2">
1499 <ul>
1500 <li>ANEURALNETWORKS_FULLY_CONNECTED
1501 </ul>
Sheri Zhang6124ce62021-05-04 14:03:13 +01001502 <td>NEFullyConnectedLayer
Teresa Charlin62687422021-04-28 10:58:49 +01001503 <td>
1504 <ul>
1505 <li>NHWC
1506 <li>NCHW
1507 </ul>
1508 <td>
1509 <table>
1510 <tr><th>src0<th>src1<th>src2<th>dst
1511 <tr><td>F16<td>F16<td>F16<td>F16
1512 <tr><td>F32<td>F32<td>F32<td>F32
1513 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
1514 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
1515 </table>
1516<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +01001517 <td>CLFullyConnectedLayer
Teresa Charlin62687422021-04-28 10:58:49 +01001518 <td>
1519 <ul>
1520 <li>NHWC
1521 <li>NCHW
1522 </ul>
1523 <td>
1524 <table>
1525 <tr><th>src0<th>src1<th>src2<th>dst
1526 <tr><td>F16<td>F16<td>F16<td>F16
1527 <tr><td>F32<td>F32<td>F32<td>F32
1528 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
1529 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
1530 </table>
1531<tr>
1532 <td rowspan="2">FuseBatchNormalization
1533 <td rowspan="2" style="width:200px;"> Function to fuse the batch normalization node to a preceding convolution node.
1534 <td rowspan="2">
1535 <ul>
1536 <li>n/a
1537 </ul>
1538 <td>NEFuseBatchNormalization
1539 <td>
1540 <ul>
1541 <li>NHWC
1542 <li>NCHW
1543 </ul>
1544 <td>
1545 <table>
1546 <tr><th>src<th>dst
1547 <tr><td>F32<td>F32
1548 <tr><td>F16<td>F16
1549 </table>
1550<tr>
1551 <td>CLFuseBatchNormalization
1552 <td>
1553 <ul>
1554 <li>NHWC
1555 <li>NCHW
1556 </ul>
1557 <td>
1558 <table>
1559 <tr><th>src<th>dst
1560 <tr><td>F32<td>F32
1561 <tr><td>F16<td>F16
1562 </table>
1563<tr>
1564 <td rowspan="2">Gather
1565 <td rowspan="2" style="width:200px;"> Performs the Gather operation along the chosen axis.
1566 <td rowspan="2">
1567 <ul>
1568 <li>ANEURALNETWORKS_GATHER
1569 </ul>
1570 <td>NEGather
1571 <td>
1572 <ul>
1573 <li>All
1574 </ul>
1575 <td>
1576 <table>
1577 <tr><th>src<th>dst
1578 <tr><td>All<td>All
1579 </table>
1580<tr>
1581 <td>CLGather
1582 <td>
1583 <ul>
1584 <li>All
1585 </ul>
1586 <td>
1587 <table>
1588 <tr><th>src<th>dst
1589 <tr><td>All<td>All
1590 </table>
1591<tr>
1592 <td rowspan="2">GEMM
1593 <td rowspan="2" style="width:200px;"> General Matrix Multiplication.
1594 <td rowspan="2">
1595 <ul>
1596 <li>n/a
1597 </ul>
1598 <td>NEGEMM
1599 <td>
1600 <ul>
1601 <li>All
1602 </ul>
1603 <td>
1604 <table>
1605 <tr><th>src0<th>src1<th>src2<th>dst
1606 <tr><td>F32<td>F32<td>F32<td>F32
1607 <tr><td>F16<td>F16<td>F16<td>F16
1608 <tr><td>BFLOAT16<td>BFLOAT16<td>BFLOAT16<td>BFLOAT16
1609 </table>
1610<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +01001611 <td>CLGEMM
Teresa Charlin62687422021-04-28 10:58:49 +01001612 <td>
1613 <ul>
1614 <li>All
1615 </ul>
1616 <td>
1617 <table>
1618 <tr><th>src0<th>src1<th>src2<th>dst
1619 <tr><td>F32<td>F32<td>F32<td>F32
1620 <tr><td>F16<td>F16<td>F16<td>F16
1621 </table>
1622<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +01001623 <td rowspan="1">GEMMConv2D
1624 <td rowspan="1" style="width:200px;"> General Matrix Multiplication.
1625 <td rowspan="1">
1626 <ul>
1627 <li>ANEURALNETWORKS_CONV_2D
1628 </ul>
1629 <td>NEGEMMConv2d
1630 <td>
1631 <ul>
1632 <li>All
1633 </ul>
1634 <td>
1635 <table>
1636 <tr><th>src0<th>src1<th>src2<th>dst
1637 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
1638 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
1639 <tr><td>F16<td>F16<td>F16<td>F16
1640 <tr><td>F32<td>F32<td>F32<td>F32
1641 <tr><td>BFLOAT16<td>BFLOAT16<td>BFLOAT16<td>BFLOAT16
1642 </table>
1643<tr>
Teresa Charlin62687422021-04-28 10:58:49 +01001644 <td rowspan="2">GEMMConvolutionLayer
1645 <td rowspan="2" style="width:200px;"> General Matrix Multiplication.
1646 <td rowspan="2">
1647 <ul>
1648 <li>ANEURALNETWORKS_CONV_2D
1649 </ul>
Sheri Zhang6124ce62021-05-04 14:03:13 +01001650 <td>NEGEMMConvolutionLayer
Teresa Charlin62687422021-04-28 10:58:49 +01001651 <td>
1652 <ul>
1653 <li>NHWC
1654 <li>NCHW
1655 </ul>
1656 <td>
1657 <table>
1658 <tr><th>src0<th>src1<th>src2<th>dst
1659 <tr><td>F16<td>F16<td>F16<td>F16
1660 <tr><td>F32<td>F32<td>F32<td>F32
1661 <tr><td>BFLOAT16<td>BFLOAT16<td>BFLOAT16<td>BFLOAT16
1662 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
1663 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
1664 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
1665 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
1666 </table>
1667<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +01001668 <td>CLGEMMConvolutionLayer
Teresa Charlin62687422021-04-28 10:58:49 +01001669 <td>
1670 <ul>
1671 <li>NHWC
1672 <li>NCHW
1673 </ul>
1674 <td>
1675 <table>
1676 <tr><th>src0<th>src1<th>src2<th>dst
1677 <tr><td>F16<td>F16<td>F16<td>F16
1678 <tr><td>F32<td>F32<td>F32<td>F32
1679 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
1680 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
1681 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
1682 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
1683 </table>
1684<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +01001685 <td rowspan="1">GEMMDeconvolutionLayer
1686 <td rowspan="1" style="width:200px;"> General Matrix Multiplication.
1687 <td rowspan="1">
1688 <ul>
1689 <li>ANEURALNETWORKS_TRANSPOSE_CONV_2D
1690 </ul>
1691 <td>CLGEMMDeconvolutionLayer
1692 <td>
1693 <ul>
1694 <li>NHWC
1695 </ul>
1696 <td>
1697 <table>
1698 <tr><th>src0<th>src1<th>src2<th>dst
1699 <tr><td>F16<td>F16<td>F16<td>F16
1700 <tr><td>F32<td>F32<td>F32<td>F32
1701 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
1702 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
1703 </table>
1704<tr>
Teresa Charlin62687422021-04-28 10:58:49 +01001705 <td rowspan="2">GEMMLowpMatrixMultiplyCore
1706 <td rowspan="2" style="width:200px;"> General Matrix Multiplication.
1707 <td rowspan="2">
1708 <ul>
1709 <li>n/a
1710 </ul>
1711 <td>NEGEMMLowpMatrixMultiplyCore
1712 <td>
1713 <ul>
1714 <li>NHWC
1715 <li>NCHW
1716 </ul>
1717 <td>
1718 <table>
1719 <tr><th>src0<th>src1<th>src2<th>dst
1720 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
1721 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
1722 <tr><td>QASYMM8<td>QSYMM8<td>S32<td>QASYMM8
1723 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>S32
1724 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>S32
1725 <tr><td>QASYMM8<td>QSYMM8<td>S32<td>S32
1726 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
1727 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
1728 <tr><td>QASYMM8_SIGNED<td>QSYMM8<td>S32<td>QASYMM8_SIGNED
1729 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>S32
1730 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>S32
1731 <tr><td>QASYMM8_SIGNED<td>QSYMM8<td>S32<td>S32
1732 </table>
1733<tr>
1734 <td>CLGEMMLowpMatrixMultiplyCore
1735 <td>
1736 <ul>
1737 <li>NHWC
1738 <li>NCHW
1739 </ul>
1740 <td>
1741 <table>
1742 <tr><th>src0<th>src1<th>src2<th>dst
1743 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
1744 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
1745 <tr><td>QASYMM8<td>QSYMM8<td>S32<td>QASYMM8
1746 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>S32
1747 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>S32
1748 <tr><td>QASYMM8<td>QSYMM8<td>S32<td>S32
1749 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
1750 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
1751 <tr><td>QASYMM8_SIGNED<td>QSYMM8<td>S32<td>QASYMM8_SIGNED
1752 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>S32
1753 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>S32
1754 <tr><td>QASYMM8_SIGNED<td>QSYMM8<td>S32<td>S32
1755 </table>
1756<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +01001757 <td rowspan="2">GEMMLowpOutputStage
1758 <td rowspan="2" style="width:200px;"> General Matrix Multiplication.
1759 <td rowspan="2">
1760 <ul>
1761 <li>n/a
1762 </ul>
1763 <td>NEGEMMLowpOutputStage
1764 <td>
1765 <ul>
1766 <li>All
1767 </ul>
1768 <td>
1769 <table>
1770 <tr><th>src0<th>src1<th>dst
1771 <tr><td>S32<td>S32<td>QASYMM8
1772 <tr><td>S32<td>S32<td>QASYMM8_SIGNED
1773 <tr><td>S32<td>S32<td>QSYMM16
1774 </table>
1775<tr>
1776 <td>CLGEMMLowpOutputStage
1777 <td>
1778 <ul>
1779 <li>All
1780 </ul>
1781 <td>
1782 <table>
1783 <tr><th>src0<th>src1<th>dst
1784 <tr><td>S32<td>S32<td>QASYMM8
1785 <tr><td>S32<td>S32<td>QASYMM8_SIGNED
1786 <tr><td>S32<td>S32<td>QSYMM16
1787 </table>
1788<tr>
Teresa Charlin62687422021-04-28 10:58:49 +01001789 <td rowspan="2">GenerateProposalsLayer
1790 <td rowspan="2" style="width:200px;"> Function to generate proposals for a RPN (Region Proposal Network).
1791 <td rowspan="2">
1792 <ul>
1793 <li>ANEURALNETWORKS_GENERATE_PROPOSALS
1794 </ul>
1795 <td>NEGenerateProposalsLayer
1796 <td>
1797 <ul>
1798 <li>All
1799 </ul>
1800 <td>
1801 <table>
1802 <tr><th>src0<th>src1<th>src2<th>dst
1803 <tr><td>F16<td>F16<td>F16<td>F16
1804 <tr><td>F32<td>F32<td>F32<td>F32
1805 <tr><td>QASYMM8<td>QSYMM8<td>QSYMM16<td>QASYMM8
1806 </table>
1807<tr>
1808 <td>CLGenerateProposalsLayer
1809 <td>
1810 <ul>
1811 <li>All
1812 </ul>
1813 <td>
1814 <table>
1815 <tr><th>src0<th>src1<th>src2<th>dst
1816 <tr><td>F16<td>F16<td>F16<td>F16
1817 <tr><td>F32<td>F32<td>F32<td>F32
1818 <tr><td>QASYMM8<td>QSYMM8<td>QSYMM16<td>QASYMM8
1819 </table>
1820<tr>
1821 <td rowspan="2">InstanceNormalizationLayer
1822 <td rowspan="2" style="width:200px;"> Function to perform a Instance normalization on a given axis.
1823 <td rowspan="2">
1824 <ul>
1825 <li>ANEURALNETWORKS_INSTANCE_NORMALIZATION
1826 </ul>
1827 <td>NEInstanceNormalizationLayer
1828 <td>
1829 <ul>
1830 <li>NHWC
1831 <li>NCHW
1832 </ul>
1833 <td>
1834 <table>
1835 <tr><th>src<th>dst
1836 <tr><td>F16<td>F16
1837 <tr><td>F32<td>F32
1838 </table>
1839<tr>
1840 <td>CLInstanceNormalizationLayer
1841 <td>
1842 <ul>
1843 <li>NHWC
1844 <li>NCHW
1845 </ul>
1846 <td>
1847 <table>
1848 <tr><th>src<th>dst
1849 <tr><td>F16<td>F16
1850 <tr><td>F32<td>F32
1851 </table>
1852<tr>
1853 <td rowspan="2">L2NormalizeLayer
1854 <td rowspan="2" style="width:200px;"> Function to perform a L2 normalization on a given axis.
1855 <td rowspan="2">
1856 <ul>
1857 <li>ANEURALNETWORKS_L2_NORMALIZATION
1858 </ul>
1859 <td>NEL2NormalizeLayer
1860 <td>
1861 <ul>
1862 <li>NHWC
1863 <li>NCHW
1864 </ul>
1865 <td>
1866 <table>
1867 <tr><th>src<th>dst
1868 <tr><td>F16<td>F16
1869 <tr><td>F32<td>F32
1870 </table>
1871<tr>
1872 <td>CLL2NormalizeLayer
1873 <td>
1874 <ul>
1875 <li>NHWC
1876 <li>NCHW
1877 </ul>
1878 <td>
1879 <table>
1880 <tr><th>src<th>dst
1881 <tr><td>F16<td>F16
1882 <tr><td>F32<td>F32
1883 </table>
1884<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +01001885 <td rowspan="3">Logical
1886 <td rowspan="3" style="width:200px;"> Function to perform: - Logical AND - Logical OR - Logical NOT
1887 <td rowspan="3">
1888 <ul>
1889 <li>n/a
1890 </ul>
1891 <td>NELogicalAnd
1892 <td>
1893 <ul>
1894 <li>All
1895 </ul>
1896 <td>
1897 <table>
1898 <tr><th>src0<th>src1<th>dst
1899 <tr><td>U8<td>U8<td>U8
1900 </table>
1901<tr>
1902 <td>NELogicalOr
1903 <td>
1904 <ul>
1905 <li>All
1906 </ul>
1907 <td>
1908 <table>
1909 <tr><th>src0<th>src1<th>dst
1910 <tr><td>U8<td>U8<td>U8
1911 </table>
1912<tr>
1913 <td>NELogicalNot
1914 <td>
1915 <ul>
1916 <li>All
1917 </ul>
1918 <td>
1919 <table>
1920 <tr><th>src<th>dst
1921 <tr><td>U8<td>U8
1922 </table>
1923<tr>
1924 <td rowspan="1">LogicalAnd
1925 <td rowspan="1" style="width:200px;"> Function to perform Logical AND.
1926 <td rowspan="1">
1927 <ul>
1928 <li>n/a
1929 </ul>
1930 <td>CLLogicalAnd
1931 <td>
1932 <ul>
1933 <li>All
1934 </ul>
1935 <td>
1936 <table>
1937 <tr><th>src0<th>src1<th>dst
1938 <tr><td>U8<td>U8<td>U8
1939 </table>
1940<tr>
1941 <td rowspan="1">LogicalOr
1942 <td rowspan="1" style="width:200px;"> Function to perform Logical OR.
1943 <td rowspan="1">
1944 <ul>
1945 <li>n/a
1946 </ul>
1947 <td>CLLogicalOr
1948 <td>
1949 <ul>
1950 <li>All
1951 </ul>
1952 <td>
1953 <table>
1954 <tr><th>src0<th>src1<th>dst
1955 <tr><td>U8<td>U8<td>U8
1956 </table>
1957<tr>
1958 <td rowspan="1">LogicalNot
1959 <td rowspan="1" style="width:200px;"> Function to perform Logical NOT.
1960 <td rowspan="1">
1961 <ul>
1962 <li>n/a
1963 </ul>
1964 <td>CLLogicalNot
1965 <td>
1966 <ul>
1967 <li>All
1968 </ul>
1969 <td>
1970 <table>
1971 <tr><th>src<th>dst
1972 <tr><td>U8<td>U8
1973 </table>
1974<tr>
Teresa Charlin62687422021-04-28 10:58:49 +01001975 <td rowspan="2">LSTMLayer
1976 <td rowspan="2" style="width:200px;"> Function to perform a single time step in a Long Short-Term Memory (LSTM) layer.
1977 <td rowspan="2">
1978 <ul>
1979 <li>ANEURALNETWORKS_LSTM
1980 </ul>
1981 <td>NELSTMLayer
1982 <td>
1983 <ul>
1984 <li>All
1985 </ul>
1986 <td>
1987 <table>
1988 <tr><th>src0 - src13<th>dst0 - dst3
1989 <tr><td>F16<td>F16
1990 <tr><td>F32<td>F32
1991 </table>
1992<tr>
1993 <td>CLLSTMLayer
1994 <td>
1995 <ul>
1996 <li>All
1997 </ul>
1998 <td>
1999 <table>
2000 <tr><th>src0 - src13<th>dst0 - dst3
2001 <tr><td>F16<td>F16
2002 <tr><td>F32<td>F32
2003 </table>
2004<tr>
2005 <td rowspan="2">LSTMLayerQuantized
2006 <td rowspan="2" style="width:200px;"> Function to perform quantized LSTM (Long Short-Term Memory)
2007 <td rowspan="2">
2008 <ul>
2009 <li>ANEURALNETWORKS_QUANTIZED_LSTM
2010 <li>ANEURALNETWORKS_QUANTIZED_16BIT_LSTM
2011 </ul>
2012 <td>NELSTMLayerQuantized
2013 <td>
2014 <ul>
2015 <li>All
2016 </ul>
2017 <td>
2018 <table>
2019 <tr><th>src0 - src8<th>src9 - src12<th>src13<th>src14<th>dst0<th>dst1
2020 <tr><td>QASYMM8<td>S32<td>QSYMM16<td>QASYMM8<td>QSYMM16<td>QASYMM8
2021 </table>
2022<tr>
2023 <td>CLLSTMLayerQuantized
2024 <td>
2025 <ul>
2026 <li>All
2027 </ul>
2028 <td>
2029 <table>
2030 <tr><th>src0 - src8<th>src9 - src12<th>src13<th>src14<th>dst0<th>dst1
2031 <tr><td>QASYMM8<td>S32<td>QSYMM16<td>QASYMM8<td>QSYMM16<td>QASYMM8
2032 </table>
2033<tr>
2034 <td rowspan="2">MaxUnpoolingLayer
2035 <td rowspan="2" style="width:200px;"> Function to perform MaxUnpooling.
2036 <td rowspan="2">
2037 <ul>
2038 <li>n/a
2039 </ul>
2040 <td>NEMaxUnpoolingLayer
2041 <td>
2042 <ul>
2043 <li>NHWC
2044 <li>NCHW
2045 </ul>
2046 <td>
2047 <table>
2048 <tr><th>src<th>dst
2049 <tr><td>QASYMM8<td>QASYMM8
2050 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2051 <tr><td>F16<td>F16
2052 <tr><td>F32<td>F32
2053 </table>
2054<tr>
2055 <td>CLMaxUnpoolingLayer
2056 <td>
2057 <ul>
2058 <li>NHWC
2059 <li>NCHW
2060 </ul>
2061 <td>
2062 <table>
2063 <tr><th>src<th>dst
2064 <tr><td>QASYMM8<td>QASYMM8
2065 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2066 <tr><td>F16<td>F16
2067 <tr><td>F32<td>F32
2068 </table>
2069<tr>
2070 <td rowspan="2">MeanStdDevNormalizationLayer
2071 <td rowspan="2" style="width:200px;"> Function to execute mean and standard deviation normalization.
2072 <td rowspan="2">
2073 <ul>
2074 <li>n/a
2075 </ul>
2076 <td>NEMeanStdDevNormalizationLayer
2077 <td>
2078 <ul>
2079 <li>NHWC
2080 <li>NCHW
2081 </ul>
2082 <td>
2083 <table>
2084 <tr><th>src<th>dst
2085 <tr><td>F32<td>F32
2086 <tr><td>F16<td>F16
2087 </table>
2088<tr>
2089 <td>CLMeanStdDevNormalizationLayer
2090 <td>
2091 <ul>
2092 <li>NHWC
2093 <li>NCHW
2094 </ul>
2095 <td>
2096 <table>
2097 <tr><th>src<th>dst
2098 <tr><td>F32<td>F32
2099 <tr><td>F16<td>F16
2100 </table>
2101<tr>
2102 <td rowspan="2">NormalizationLayer
2103 <td rowspan="2" style="width:200px;"> Function to compute normalization layer.
2104 <td rowspan="2">
2105 <ul>
2106 <li>ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION
2107 </ul>
2108 <td>NENormalizationLayer
2109 <td>
2110 <ul>
2111 <li>NHWC
2112 <li>NCHW
2113 </ul>
2114 <td>
2115 <table>
2116 <tr><th>src<th>dst
2117 <tr><td>F32<td>F32
2118 <tr><td>F16<td>F16
2119 </table>
2120<tr>
2121 <td>CLNormalizationLayer
2122 <td>
2123 <ul>
2124 <li>NHWC
2125 <li>NCHW
2126 </ul>
2127 <td>
2128 <table>
2129 <tr><th>src<th>dst
2130 <tr><td>F32<td>F32
2131 <tr><td>F16<td>F16
2132 </table>
2133<tr>
2134 <td rowspan="2">PadLayer
2135 <td rowspan="2" style="width:200px;"> Function to pad a tensor.
2136 <td rowspan="2">
2137 <ul>
2138 <li>ANEURALNETWORKS_PAD
2139 <li>ANEURALNETWORKS_PAD_V2
2140 </ul>
2141 <td>NEPadLayer
2142 <td>
2143 <ul>
2144 <li>NHWC
2145 <li>NCHW
2146 </ul>
2147 <td>
2148 <table>
2149 <tr><th>src<th>dst
2150 <tr><td>All<td>All
2151 </table>
2152<tr>
2153 <td>CLPadLayer
2154 <td>
2155 <ul>
2156 <li>NHWC
2157 <li>NCHW
2158 </ul>
2159 <td>
2160 <table>
2161 <tr><th>src<th>dst
2162 <tr><td>All<td>All
2163 </table>
2164<tr>
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002165 <td rowspan="2">Permute
2166 <td rowspan="2" style="width:200px;"> Function to transpose an ND tensor.
2167 <td rowspan="2">
2168 <ul>
2169 <li>ANEURALNETWORKS_TRANSPOSE
2170 </ul>
2171 <td>NEPermute
2172 <td>
2173 <ul>
2174 <li>NHWC
2175 <li>NCHW
2176 </ul>
2177 <td>
2178 <table>
2179 <tr><th>src<th>dst
2180 <tr><td>All<td>All
2181 </table>
2182<tr>
2183 <td>CLPermute
2184 <td>
2185 <ul>
2186 <li>NHWC
2187 <li>NCHW
2188 </ul>
2189 <td>
2190 <table>
2191 <tr><th>src<th>dst
2192 <tr><td>All<td>All
2193 </table>
2194<tr>
2195 <td rowspan="2">PixelWiseMultiplication
2196 <td rowspan="2" style="width:200px;"> Function to performe a multiplication.
2197 <td rowspan="2">
2198 <ul>
2199 <li>ANEURALNETWORKS_MUL
2200 </ul>
2201 <td>NEPixelWiseMultiplication
2202 <td>
2203 <ul>
2204 <li>All
2205 </ul>
2206 <td>
2207 <table>
2208 <tr><th>src0<th>src1<th>dst
2209 <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
2210 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2211 <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
2212 <tr><td>QSYMM16<td>QSYMM16<td>S32
2213 <tr><td>U8<td>U8<td>U8
2214 <tr><td>U8<td>U8<td>S16
2215 <tr><td>U8<td>S16<td>S16
2216 <tr><td>S16<td>U8<td>S16
2217 <tr><td>S16<td>S16<td>S16
2218 <tr><td>F16<td>F16<td>F16
2219 <tr><td>F32<td>S32<td>F32
2220 </table>
2221<tr>
2222 <td>CLPixelWiseMultiplication
2223 <td>
2224 <ul>
2225 <li>All
2226 </ul>
2227 <td>
2228 <table>
2229 <tr><th>src0<th>src1<th>dst
2230 <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
2231 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2232 <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
2233 <tr><td>QSYMM16<td>QSYMM16<td>S32
2234 <tr><td>U8<td>U8<td>U8
2235 <tr><td>U8<td>U8<td>S16
2236 <tr><td>U8<td>S16<td>S16
2237 <tr><td>S16<td>U8<td>S16
2238 <tr><td>S16<td>S16<td>S16
2239 <tr><td>F16<td>F16<td>F16
2240 <tr><td>F32<td>S32<td>F32
2241 </table>
2242<tr>
2243 <td rowspan="2">PoolingLayer
2244 <td rowspan="2" style="width:200px;"> Function to performe pooling with the specified pooling operation.
2245 <td rowspan="2">
2246 <ul>
2247 <li>ANEURALNETWORKS_AVERAGE_POOL_2D
2248 <li>ANEURALNETWORKS_L2_POOL_2D
2249 <li>ANEURALNETWORKS_MAX_POOL_2D
2250 </ul>
2251 <td>NEPoolingLayer
2252 <td>
2253 <ul>
2254 <li>NHWC
2255 <li>NCHW
2256 </ul>
2257 <td>
2258 <table>
2259 <tr><th>src<th>dst
2260 <tr><td>QASYMM8<td>QASYMM8
2261 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2262 <tr><td>F16<td>F16
2263 <tr><td>F32<td>F32
2264 </table>
2265<tr>
2266 <td>CLPoolingLayer
2267 <td>
2268 <ul>
2269 <li>NHWC
2270 <li>NCHW
2271 </ul>
2272 <td>
2273 <table>
2274 <tr><th>src<th>dst
2275 <tr><td>QASYMM8<td>QASYMM8
2276 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2277 <tr><td>F16<td>F16
2278 <tr><td>F32<td>F32
2279 </table>
2280<tr>
2281 <td rowspan="2">PReluLayer
2282 <td rowspan="2" style="width:200px;"> Function to compute the activation layer with the PRELU activation function.
2283 <td rowspan="2">
2284 <ul>
2285 <li>ANEURALNETWORKS_PRELU
2286 </ul>
2287 <td>NEPReluLayer
2288 <td>
2289 <ul>
2290 <li>All
2291 </ul>
2292 <td>
2293 <table>
2294 <tr><th>src<th>dst
2295 <tr><td>QASYMM8<td>QASYMM8
2296 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2297 <tr><td>F16<td>F16
2298 <tr><td>F32<td>F32
2299 </table>
2300<tr>
2301 <td>CLPReluLayer
2302 <td>
2303 <ul>
2304 <li>All
2305 </ul>
2306 <td>
2307 <table>
2308 <tr><th>src<th>dst
2309 <tr><td>QASYMM8<td>QASYMM8
2310 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2311 <tr><td>F16<td>F16
2312 <tr><td>F32<td>F32
2313 </table>
2314<tr>
Teresa Charlin62687422021-04-28 10:58:49 +01002315 <td rowspan="2">PriorBoxLayer
Sheri Zhang6124ce62021-05-04 14:03:13 +01002316 <td rowspan="2" style="width:200px;"> Function to compute prior boxes and clip.
Teresa Charlin62687422021-04-28 10:58:49 +01002317 <td rowspan="2">
2318 <ul>
2319 <li>n/a
2320 </ul>
2321 <td>NEPriorBoxLayer
2322 <td>
2323 <ul>
2324 <li>NHWC
2325 <li>NCHW
2326 </ul>
2327 <td>
2328 <table>
2329 <tr><th>src0<th>src1<th>dst
2330 <tr><td>F32<td>F32<td>F32
2331 </table>
2332<tr>
2333 <td>CLPriorBoxLayer
2334 <td>
2335 <ul>
2336 <li>NHWC
2337 <li>NCHW
2338 </ul>
2339 <td>
2340 <table>
2341 <tr><th>src0<th>src1<th>dst
2342 <tr><td>F32<td>F32<td>F32
2343 </table>
2344<tr>
2345 <td rowspan="2">QLSTMLayer
2346 <td rowspan="2" style="width:200px;"> Function to perform quantized LSTM (Long Short-Term Memory).
2347 <td rowspan="2">
2348 <ul>
2349 <li>ANEURALNETWORKS_QUANTIZED_LSTM
2350 <li>ANEURALNETWORKS_QUANTIZED_16BIT_LSTM
2351 </ul>
2352 <td>NEQLSTMLayer
2353 <td>
2354 <ul>
2355 <li>All
2356 </ul>
2357 <td>
2358 <table>
2359 <tr><th>src0<th>src1 - src6<th>src7 -src9<th>src10<th>src11<th>dst0<th>dst1 - dst2
2360 <tr><td>QASYMM8_SIGNED<td>QASYMM8<td>S32<td>QSYMM16<td>QASYMM8_SIGNED<td>QSYMM16<td>QASYMM8_SIGNED
2361 </table>
2362<tr>
2363 <td>CLQLSTMLayer
2364 <td>
2365 <ul>
2366 <li>All
2367 </ul>
2368 <td>
2369 <table>
2370 <tr><th>src0<th>src1 - src6<th>src7 -src9<th>src10<th>src11<th>dst0<th>dst1 - dst2
2371 <tr><td>QASYMM8_SIGNED<td>QASYMM8<td>S32<td>QSYMM16<td>QASYMM8_SIGNED<td>QSYMM16<td>QASYMM8_SIGNED
2372 </table>
2373<tr>
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002374 <td rowspan="2">QuantizationLayer
2375 <td rowspan="2" style="width:200px;"> Function to perform quantization layer
2376 <td rowspan="2">
2377 <ul>
2378 <li>ANEURALNETWORKS_QUANTIZE
2379 </ul>
2380 <td>NEQuantizationLayer
2381 <td>
2382 <ul>
2383 <li>All
2384 </ul>
2385 <td>
2386 <table>
2387 <tr><th>src<th>dst
Teresa Charlin62687422021-04-28 10:58:49 +01002388 <tr><td>QASYMM8<td>QASYMM8, QASYMM8_SIGNED, QASYMM16
2389 <tr><td>QASYMM8_SIGNED<td>QASYMM8, QASYMM8_SIGNED, QASYMM16
2390 <tr><td>F16<td>QASYMM8, QASYMM8_SIGNED, QASYMM16
2391 <tr><td>F32<td>QASYMM8, QASYMM8_SIGNED, QASYMM16
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002392 </table>
2393<tr>
2394 <td>CLQuantizationLayer
2395 <td>
2396 <ul>
2397 <li>All
2398 </ul>
2399 <td>
2400 <table>
2401 <tr><th>src<th>dst
Teresa Charlin62687422021-04-28 10:58:49 +01002402 <tr><td>QASYMM8<td>QASYMM8, QASYMM8_SIGNED, QASYMM16
2403 <tr><td>QASYMM8_SIGNED<td>QASYMM8, QASYMM8_SIGNED, QASYMM16
2404 <tr><td>F16<td>QASYMM8, QASYMM8_SIGNED, QASYMM16
2405 <tr><td>F32<td>QASYMM8, QASYMM8_SIGNED, QASYMM16
2406 </table>
2407<tr>
2408 <td rowspan="2">Range
2409 <td rowspan="2" style="width:200px;"> Function to generates a sequence of numbers starting from START and extends by increments of 'STEP' up to but not including 'END'.
2410 <td rowspan="2">
2411 <ul>
2412 <li>n/a
2413 </ul>
2414 <td>NERange
2415 <td>
2416 <ul>
2417 <li>All
2418 </ul>
2419 <td>
2420 <table>
2421 <tr><th>dst
2422 <tr><td>U8
2423 <tr><td>S8
2424 <tr><td>U16
2425 <tr><td>S16
2426 <tr><td>U32
2427 <tr><td>S32
2428 <tr><td>F16
2429 <tr><td>F32
2430 </table>
2431<tr>
2432 <td>CLRange
2433 <td>
2434 <ul>
2435 <li>All
2436 </ul>
2437 <td>
2438 <table>
2439 <tr><th>dst
2440 <tr><td>U8
2441 <tr><td>S8
2442 <tr><td>QASYMM8
2443 <tr><td>U16
2444 <tr><td>S16
2445 <tr><td>U32
2446 <tr><td>S32
2447 <tr><td>F16
2448 <tr><td>F32
2449 </table>
2450<tr>
2451 <td rowspan="2">ReduceMean
2452 <td rowspan="2" style="width:200px;"> Function to performe reduce mean operation.
2453 <td rowspan="2">
2454 <ul>
2455 <li>ANEURALNETWORKS_MEAN
2456 </ul>
2457 <td>NEReduceMean
2458 <td>
2459 <ul>
2460 <li>All
2461 </ul>
2462 <td>
2463 <table>
2464 <tr><th>src<th>dst
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002465 <tr><td>QASYMM8<td>QASYMM8
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002466 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
Teresa Charlin62687422021-04-28 10:58:49 +01002467 <tr><td>F16<td>F16
2468 <tr><td>F32<td>F32
2469 </table>
2470<tr>
2471 <td>CLReduceMean
2472 <td>
2473 <ul>
2474 <li>All
2475 </ul>
2476 <td>
2477 <table>
2478 <tr><th>src<th>dst
2479 <tr><td>QASYMM8<td>QASYMM8
2480 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2481 <tr><td>F16<td>F16
2482 <tr><td>F32<td>F32
2483 </table>
2484<tr>
2485 <td rowspan="2">ReductionOperation
2486 <td rowspan="2" style="width:200px;"> Function to performe reduce with the following operations - ARG_IDX_MAX: Index of the max value - ARG_IDX_MIN: Index of the min value - MEAN_SUM: Mean of sum - PROD: Product - SUM_SQUARE: Sum of squares - SUM: Sum - MIN: Min - MAX: Max
2487 <td rowspan="2">
2488 <ul>
2489 <li>ANEURALNETWORKS_REDUCE_ALL
2490 <li>ANEURALNETWORKS_REDUCE_ANY
2491 <li>ANEURALNETWORKS_REDUCE_MAX
2492 <li>ANEURALNETWORKS_REDUCE_MIN
2493 <li>ANEURALNETWORKS_REDUCE_PROD
2494 <li>ANEURALNETWORKS_REDUCE_SUM
2495 </ul>
2496 <td>NEReductionOperation
2497 <td>
2498 <ul>
2499 <li>All
2500 </ul>
2501 <td>
2502 <table>
2503 <tr><th>src<th>dst
2504 <tr><td>QASYMM8<td>QASYMM8
2505 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2506 <tr><td>F16<td>F16
2507 <tr><td>F32<td>F32
2508 <tr><td>S32<td>S32
2509 </table>
2510<tr>
2511 <td>CLReductionOperation
2512 <td>
2513 <ul>
2514 <li>All
2515 </ul>
2516 <td>
2517 <table>
2518 <tr><th>src<th>dst
2519 <tr><td>QASYMM8<td>QASYMM8
2520 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2521 <tr><td>F16<td>F16
2522 <tr><td>F32<td>F32
2523 <tr><td>S32<td>S32
2524 </table>
2525<tr>
2526 <td rowspan="2">ReorgLayer
2527 <td rowspan="2" style="width:200px;"> Performs a reorganization layer of input tensor to the output tensor.
2528 <td rowspan="2">
2529 <ul>
2530 <li>n/a
2531 </ul>
2532 <td>NEReorgLayer
2533 <td>
2534 <ul>
2535 <li>NHWC
2536 <li>NCHW
2537 </ul>
2538 <td>
2539 <table>
2540 <tr><th>src<th>dst
2541 <tr><td>All<td>All
2542 </table>
2543<tr>
2544 <td>CLReorgLayer
2545 <td>
2546 <ul>
2547 <li>NHWC
2548 <li>NCHW
2549 </ul>
2550 <td>
2551 <table>
2552 <tr><th>src<th>dst
2553 <tr><td>All<td>All
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002554 </table>
2555<tr>
2556 <td rowspan="2">ReshapeLayer
Teresa Charlin62687422021-04-28 10:58:49 +01002557 <td rowspan="2" style="width:200px;"> Function to reshape a tensor.
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002558 <td rowspan="2">
2559 <ul>
2560 <li>ANEURALNETWORKS_RESHAPE
2561 <li>ANEURALNETWORKS_SQUEEZE
2562 </ul>
2563 <td>NEReshapeLayer
2564 <td>
2565 <ul>
2566 <li>All
2567 </ul>
2568 <td>
2569 <table>
2570 <tr><th>src<th>dst
2571 <tr><td>All<td>All
2572 </table>
2573<tr>
2574 <td>CLReshapeLayer
2575 <td>
2576 <ul>
2577 <li>All
2578 </ul>
2579 <td>
2580 <table>
2581 <tr><th>src<th>dst
2582 <tr><td>All<td>All
2583 </table>
2584<tr>
Teresa Charlin62687422021-04-28 10:58:49 +01002585 <td rowspan="2">Reverse
2586 <td rowspan="2" style="width:200px;"> Function to reverse tensor according to axis.
2587 <td rowspan="2">
2588 <ul>
2589 <li>n/a
2590 </ul>
2591 <td>NEReverse
2592 <td>
2593 <ul>
2594 <li>All
2595 </ul>
2596 <td>
2597 <table>
2598 <tr><th>src0<th>src1<th>dst
2599 <tr><td>All<td>U32<td>All
2600 </table>
2601<tr>
2602 <td>CLReverse
2603 <td>
2604 <ul>
2605 <li>All
2606 </ul>
2607 <td>
2608 <table>
2609 <tr><th>src0<th>src1<th>dst
2610 <tr><td>All<td>U32<td>All
2611 </table>
2612<tr>
2613 <td rowspan="2">RNNLayer
2614 <td rowspan="2" style="width:200px;"> Function to perform recurrent neural network layer.
2615 <td rowspan="2">
2616 <ul>
2617 <li>ANEURALNETWORKS_RNN
2618 </ul>
2619 <td>NERNNLayer
2620 <td>
2621 <ul>
2622 <li>NHWC
2623 <li>NCHW
2624 </ul>
2625 <td>
2626 <table>
2627 <tr><th>src0<th>src1<th>src2<th>src3<th>dst0<th>dst1
2628 <tr><td>F16<td>F16<td>F16<td>F16<td>F16<td>F16
2629 <tr><td>F32<td>F32<td>F32<td>F32<td>F32<td>F32
2630 </table>
2631<tr>
2632 <td>CLRNNLayer
2633 <td>
2634 <ul>
2635 <li>NHWC
2636 <li>NCHW
2637 </ul>
2638 <td>
2639 <table>
2640 <tr><th>src0<th>src1<th>src2<th>src3<th>dst0<th>dst1
2641 <tr><td>F16<td>F16<td>F16<td>F16<td>F16<td>F16
2642 <tr><td>F32<td>F32<td>F32<td>F32<td>F32<td>F32
2643 </table>
2644<tr>
2645 <td rowspan="2">ROIAlignLayer
2646 <td rowspan="2" style="width:200px;"> Function to perform ROI alignment.
2647 <td rowspan="2">
2648 <ul>
2649 <li>ANEURALNETWORKS_ROI_ALIGN
2650 </ul>
2651 <td>NEROIAlignLayer
2652 <td>
2653 <ul>
2654 <li>All
2655 </ul>
2656 <td>
2657 <table>
2658 <tr><th>src0<th>src1<th>dst
2659 <tr><td>F16<td>F16<td>F16
2660 <tr><td>F32<td>F32<td>F32
2661 <tr><td>QASYMM8<td>QASYMM16<td>QASYMM8
2662 <tr><td>QASYMM8_SIGNED<td>QASYMM16<td>QASYMM8_SIGNED
2663 </table>
2664<tr>
2665 <td>CLROIAlignLayer
2666 <td>
2667 <ul>
2668 <li>All
2669 </ul>
2670 <td>
2671 <table>
2672 <tr><th>src0<th>src1<th>dst
2673 <tr><td>F16<td>F16<td>F16
2674 <tr><td>F32<td>F32<td>F32
2675 <tr><td>QASYMM8<td>QASYMM16<td>QASYMM8
2676 <tr><td>QASYMM8_SIGNED<td>QASYMM16<td>QASYMM8_SIGNED
2677 </table>
2678<tr>
2679 <td rowspan="2">ROIPoolingLayer
2680 <td rowspan="2" style="width:200px;"> Function to perform ROI pooling.
2681 <td rowspan="2">
2682 <ul>
2683 <li>ANEURALNETWORKS_ROI_POOLING
2684 </ul>
2685 <td>NEROIPoolingLayer
2686 <td>
2687 <ul>
2688 <li>All
2689 </ul>
2690 <td>
2691 <table>
2692 <tr><th>src0<th>src1<th>dst
2693 <tr><td>F32<td>U16<td>F32
2694 <tr><td>QASYMM8<td>U16<td>QASYMM8
2695 </table>
2696<tr>
2697 <td>CLROIPoolingLayer
2698 <td>
2699 <ul>
2700 <li>All
2701 </ul>
2702 <td>
2703 <table>
2704 <tr><th>src0<th>src1<th>dst
2705 <tr><td>F16<td>U16<td>F16
2706 <tr><td>F32<td>U16<td>F32
2707 <tr><td>QASYMM8<td>U16<td>QASYMM8
2708 </table>
2709<tr>
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002710 <td rowspan="2">Scale
Teresa Charlin62687422021-04-28 10:58:49 +01002711 <td rowspan="2" style="width:200px;"> Function to perform resize a tensor using to interpolate: - Bilinear - Nearest neighbor
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002712 <td rowspan="2">
2713 <ul>
2714 <li>ANEURALNETWORKS_RESIZE_BILINEAR
2715 <li>ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR
2716 </ul>
2717 <td>NEScale
2718 <td>
2719 <ul>
2720 <li>NHWC
2721 <li>NCHW
2722 </ul>
2723 <td>
2724 <table>
2725 <tr><th>src<th>dst
2726 <tr><td>QASYMM8<td>QASYMM8
2727 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2728 <tr><td>F16<td>F16
2729 <tr><td>F32<td>F32
2730 <tr><td>U8<td>U8
2731 <tr><td>S16<td>S16
2732 </table>
2733<tr>
2734 <td>CLScale
2735 <td>
2736 <ul>
2737 <li>NHWC
2738 <li>NCHW
2739 </ul>
2740 <td>
2741 <table>
2742 <tr><th>src<th>dst
2743 <tr><td>QASYMM8<td>QASYMM8
2744 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2745 <tr><td>F16<td>F16
2746 <tr><td>F32<td>F32
2747 <tr><td>U8<td>U8
2748 <tr><td>S16<td>S16
2749 </table>
2750<tr>
Teresa Charlin62687422021-04-28 10:58:49 +01002751 <td rowspan="2">Select
2752 <td rowspan="2" style="width:200px;"> Function to select values from 2 tensors depending on an input tensor of booleans.
2753 <td rowspan="2">
2754 <ul>
2755 <li>ANEURALNETWORKS_SELECT
2756 </ul>
2757 <td>NESelect
2758 <td>
2759 <ul>
2760 <li>All
2761 </ul>
2762 <td>
2763 <table>
2764 <tr><th>src0<th>src1<th>src2<th>dst
2765 <tr><td>U8<td>All<td>All<td>All
2766 </table>
2767<tr>
2768 <td>CLSelect
2769 <td>
2770 <ul>
2771 <li>All
2772 </ul>
2773 <td>
2774 <table>
2775 <tr><th>src0<th>src1<th>src2<th>dst
2776 <tr><td>U8<td>All<td>All<td>All
2777 </table>
2778<tr>
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002779 <td rowspan="2">Slice
2780 <td rowspan="2" style="width:200px;"> Function to perform tensor slicing.
2781 <td rowspan="2">
2782 <ul>
2783 <li>ANEURALNETWORKS_SLICE
2784 </ul>
2785 <td>NESlice
2786 <td>
2787 <ul>
2788 <li>All
2789 </ul>
2790 <td>
2791 <table>
2792 <tr><th>src<th>dst
2793 <tr><td>All<td>All
2794 </table>
2795<tr>
2796 <td>CLSlice
2797 <td>
2798 <ul>
2799 <li>All
2800 </ul>
2801 <td>
2802 <table>
2803 <tr><th>src<th>dst
2804 <tr><td>All<td>All
2805 </table>
2806<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +01002807 <td rowspan="2">SoftmaxLayer
2808 <td rowspan="2" style="width:200px;"> Function to compute a SoftmaxLayer and a Log SoftmaxLayer.
2809 <td rowspan="2">
2810 <ul>
2811 <li>ANEURALNETWORKS_LOG_SOFTMAX
2812 <li>ANEURALNETWORKS_SOFTMAX
2813 </ul>
2814 <td>NESoftmaxLayerGeneric
2815 <td>
2816 <ul>
2817 <li>All
2818 </ul>
2819 <td>
2820 <table>
2821 <tr><th>src<th>dst
2822 <tr><td>QASYMM8<td>QASYMM8
2823 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2824 <tr><td>F16<td>F16
2825 <tr><td>F32<td>F32
2826 </table>
2827<tr>
2828 <td>CLSoftmaxLayerGeneric
2829 <td>
2830 <ul>
2831 <li>All
2832 </ul>
2833 <td>
2834 <table>
2835 <tr><th>src<th>dst
2836 <tr><td>QASYMM8<td>QASYMM8
2837 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2838 <tr><td>F16<td>F16
2839 <tr><td>F32<td>F32
2840 </table>
2841<tr>
Teresa Charlin62687422021-04-28 10:58:49 +01002842 <td rowspan="2">SpaceToBatchLayer
2843 <td rowspan="2" style="width:200px;"> Function to divide a tensor spatially.
2844 <td rowspan="2">
2845 <ul>
2846 <li>ANEURALNETWORKS_SPACE_TO_BATCH_ND
2847 </ul>
2848 <td>NESpaceToBatchLayer
2849 <td>
2850 <ul>
2851 <li>NHWC
2852 <li>NCHW
2853 </ul>
2854 <td>
2855 <table>
2856 <tr><th>src0<th>src1<th>src2<th>dst
2857 <tr><td>All<td>S32<td>S32<td>All
2858 </table>
2859<tr>
2860 <td>CLSpaceToBatchLayer
2861 <td>
2862 <ul>
2863 <li>NHWC
2864 <li>NCHW
2865 </ul>
2866 <td>
2867 <table>
2868 <tr><th>src0<th>src1<th>src2<th>dst
2869 <tr><td>All<td>S32<td>S32<td>All
2870 </table>
2871<tr>
2872 <td rowspan="2">SpaceToDepthLayer
2873 <td rowspan="2" style="width:200px;"> Function to rearrange blocks of spatial data into depth.
2874 <td rowspan="2">
2875 <ul>
2876 <li>ANEURALNETWORKS_SPACE_TO_DEPTH
2877 </ul>
2878 <td>NESpaceToDepthLayer
2879 <td>
2880 <ul>
2881 <li>NHWC
2882 <li>NCHW
2883 </ul>
2884 <td>
2885 <table>
2886 <tr><th>src<th>dst
2887 <tr><td>All<td>All
2888 </table>
2889<tr>
2890 <td>CLSpaceToDepthLayer
2891 <td>
2892 <ul>
2893 <li>NHWC
2894 <li>NCHW
2895 </ul>
2896 <td>
2897 <table>
2898 <tr><th>src<th>dst
2899 <tr><td>All<td>All
2900 </table>
2901<tr>
2902 <td rowspan="2">Split
2903 <td rowspan="2" style="width:200px;"> Function to split a tensor along a given axis.
2904 <td rowspan="2">
2905 <ul>
2906 <li>ANEURALNETWORKS_SPLIT
2907 </ul>
2908 <td>NESplit
2909 <td>
2910 <ul>
2911 <li>All
2912 </ul>
2913 <td>
2914 <table>
2915 <tr><th>src<th>dst
2916 <tr><td>All<td>All
2917 </table>
2918<tr>
2919 <td>CLSplit
2920 <td>
2921 <ul>
2922 <li>All
2923 </ul>
2924 <td>
2925 <table>
2926 <tr><th>src<th>dst
2927 <tr><td>All<td>All
2928 </table>
2929<tr>
2930 <td rowspan="2">StackLayer
2931 <td rowspan="2" style="width:200px;"> Function to stack tensors along an axis.
2932 <td rowspan="2">
2933 <ul>
2934 <li>n/a
2935 </ul>
2936 <td>NEStackLayer
2937 <td>
2938 <ul>
2939 <li>All
2940 </ul>
2941 <td>
2942 <table>
2943 <tr><th>src<th>dst
2944 <tr><td>All<td>All
2945 </table>
2946<tr>
2947 <td>CLStackLayer
2948 <td>
2949 <ul>
2950 <li>All
2951 </ul>
2952 <td>
2953 <table>
2954 <tr><th>src<th>dst
2955 <tr><td>All<td>All
2956 </table>
2957<tr>
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002958 <td rowspan="2">StridedSlice
2959 <td rowspan="2" style="width:200px;"> Function to extract a strided slice of a tensor.
2960 <td rowspan="2">
2961 <ul>
2962 <li>ANEURALNETWORKS_STRIDED_SLICE
2963 </ul>
2964 <td>NEStridedSlice
2965 <td>
2966 <ul>
2967 <li>All
2968 </ul>
2969 <td>
2970 <table>
2971 <tr><th>src<th>dst
2972 <tr><td>All<td>All
2973 </table>
2974<tr>
2975 <td>CLStridedSlice
2976 <td>
2977 <ul>
2978 <li>All
2979 </ul>
2980 <td>
2981 <table>
2982 <tr><th>src<th>dst
2983 <tr><td>All<td>All
2984 </table>
2985<tr>
Teresa Charlin62687422021-04-28 10:58:49 +01002986 <td rowspan="2">Tile
2987 <td rowspan="2" style="width:200px;"> Function to construct a tensor by tiling a given tensor.
2988 <td rowspan="2">
2989 <ul>
2990 <li>ANEURALNETWORKS_TILE
2991 </ul>
2992 <td>NETile
2993 <td>
2994 <ul>
2995 <li>All
2996 </ul>
2997 <td>
2998 <table>
2999 <tr><th>src<th>dst
3000 <tr><td>All<td>All
3001 </table>
3002<tr>
3003 <td>CLTile
3004 <td>
3005 <ul>
3006 <li>All
3007 </ul>
3008 <td>
3009 <table>
3010 <tr><th>src<th>dst
3011 <tr><td>All<td>All
3012 </table>
3013<tr>
Sheri Zhanga47dcc22021-04-22 14:41:12 +01003014 <td rowspan="2">Transpose
Teresa Charlin62687422021-04-28 10:58:49 +01003015 <td rowspan="2" style="width:200px;"> Function to transpose a 2D tensor.
Sheri Zhanga47dcc22021-04-22 14:41:12 +01003016 <td rowspan="2">
3017 <ul>
3018 <li>ANEURALNETWORKS_TRANSPOSE
3019 </ul>
3020 <td>NETranspose
3021 <td>
3022 <ul>
3023 <li>All
3024 </ul>
3025 <td>
3026 <table>
3027 <tr><th>src<th>dst
3028 <tr><td>All<td>All
3029 </table>
3030<tr>
3031 <td>CLTranspose
3032 <td>
3033 <ul>
3034 <li>All
3035 </ul>
3036 <td>
3037 <table>
3038 <tr><th>src<th>dst
3039 <tr><td>All<td>All
3040 </table>
Teresa Charlin62687422021-04-28 10:58:49 +01003041<tr>
3042 <td rowspan="2">Unstack
3043 <td rowspan="2" style="width:200px;"> Function to unpack a rank-R tensor into rank-(R-1) tensors.
3044 <td rowspan="2">
3045 <ul>
3046 <li>n/a
3047 </ul>
3048 <td>NEUnstack
3049 <td>
3050 <ul>
3051 <li>All
3052 </ul>
3053 <td>
3054 <table>
3055 <tr><th>src<th>dst
3056 <tr><td>All<td>All
3057 </table>
3058<tr>
3059 <td>CLUnstack
3060 <td>
3061 <ul>
3062 <li>All
3063 </ul>
3064 <td>
3065 <table>
3066 <tr><th>src<th>dst
3067 <tr><td>All<td>All
3068 </table>
3069<tr>
3070 <td rowspan="2">WinogradConvolutionLayer
3071 <td rowspan="2" style="width:200px;"> Function to do Winograd Convolution.
3072 <td rowspan="2">
3073 <ul>
3074 <li>ANEURALNETWORKS_CONV_2D
3075 </ul>
3076 <td>NEWinogradConvolutionLayer
3077 <td>
3078 <ul>
3079 <li>NHWC
3080 <li>NCHW
3081 </ul>
3082 <td>
3083 <table>
3084 <tr><th>src0<th>src1<th>src2<th>dst
3085 <tr><td>F16<td>F16<td>F16<td>F16
3086 <tr><td>F32<td>F32<td>F32<td>F32
3087 </table>
3088<tr>
3089 <td>CLWinogradConvolutionLayer
3090 <td>
3091 <ul>
3092 <li>NHWC
3093 <li>NCHW
3094 </ul>
3095 <td>
3096 <table>
3097 <tr><th>src0<th>src1<th>src2<th>dst
3098 <tr><td>F16<td>F16<td>F16<td>F16
3099 <tr><td>F32<td>F32<td>F32<td>F32
3100 </table>
Sheri Zhang6124ce62021-05-04 14:03:13 +01003101<tr>
3102 <td rowspan="1">WinogradInputTransform
3103 <td rowspan="1" style="width:200px;"> Function to.
3104 <td rowspan="1">
3105 <ul>
3106 <li>n/a
3107 </ul>
3108 <td>CLWinogradInputTransform
3109 <td>
3110 <ul>
3111 <li>NHWC
3112 <li>NCHW
3113 </ul>
3114 <td>
3115 <table>
3116 <tr><th>src<th>dst
3117 <tr><td>F16<td>F16
3118 <tr><td>F32<td>F32
3119 </table>
Sheri Zhanga47dcc22021-04-22 14:41:12 +01003120</table>
3121
3122*/
3123} // namespace