blob: 05cc892d405f1d848f929dc39c722d5449b1f9bf [file] [log] [blame]
Sheri Zhanga47dcc22021-04-22 14:41:12 +01001///
2/// Copyright (c) 2021 Arm Limited.
3///
4/// SPDX-License-Identifier: MIT
5///
6/// Permission is hereby granted, free of charge, to any person obtaining a copy
7/// of this software and associated documentation files (the "Software"), to
8/// deal in the Software without restriction, including without limitation the
9/// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10/// sell copies of the Software, and to permit persons to whom the Software is
11/// furnished to do so, subject to the following conditions:
12///
13/// The above copyright notice and this permission notice shall be included in all
14/// copies or substantial portions of the Software.
15///
16/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22/// SOFTWARE.
23///
24namespace arm_compute
25{
26/**
27@page operators_list Supported Operators
28
29@tableofcontents
30
31@section S9_1_operators_list Supported Operators
32
33Compute Library supports operators that are listed in below table.
34
35Compute Library supports a wide list of data-types, information can been directly found in the documentation of each kernel/function.
36The main data-types that the Machine Learning functions support are the following:
37 <ul>
38 <li>BFLOAT16: 16-bit non-standard brain floating point
39 <li>QASYMM8: 8-bit unsigned asymmetric quantized
40 <li>QASYMM8_SIGNED: 8-bit signed asymmetric quantized
41 <li>QSYMM8_PER_CHANNEL: 8-bit signed symmetric quantized (Used for the weights)
42 <li>QSYMM8: 8-bit unsigned symmetric quantized
43 <li>QSYMM16: 16-bit unsigned symmetric quantized
44 <li>F32: 32-bit single precision floating point
45 <li>F16: 16-bit half precision floating point
46 <li>S32: 32-bit signed integer
47 <li>U8: 8-bit unsigned char
Jakub Sujakee301b32021-06-04 09:46:08 +010048 <li>All: Agnostic to any specific data type
Sheri Zhanga47dcc22021-04-22 14:41:12 +010049 </ul>
50
51Compute Library supports the following data layouts (fast changing dimension from right to left):
52 <ul>
53 <li>NHWC: The native layout of Compute Library that delivers the best performance where channels are in the fastest changing dimension
54 <li>NCHW: Legacy layout where width is in the fastest changing dimension
Jakub Sujakee301b32021-06-04 09:46:08 +010055 <li>All: Agnostic to any specific data layout
Sheri Zhanga47dcc22021-04-22 14:41:12 +010056 </ul>
57where N = batches, C = channels, H = height, W = width
58
59<table>
60<caption id="multi_row"></caption>
61<tr>
62 <th>Function
63 <th>Description
64 <th>Equivalent Android NNAPI Op
65 <th>Backends
66 <th>Data Layouts
67 <th>Data Types
68<tr>
69 <td rowspan="2">ActivationLayer
70 <td rowspan="2" style="width:200px;"> Function to simulate an activation layer with the specified activation function.
71 <td rowspan="2">
72 <ul>
73 <li>ANEURALNETWORKS_ELU
74 <li>ANEURALNETWORKS_HARD_SWISH
75 <li>ANEURALNETWORKS_LOGISTIC
76 <li>ANEURALNETWORKS_RELU
77 <li>ANEURALNETWORKS_RELU1
78 <li>ANEURALNETWORKS_RELU6
79 <li>ANEURALNETWORKS_TANH
80 </ul>
81 <td>NEActivationLayer
82 <td>
83 <ul>
84 <li>All
85 </ul>
86 <td>
87 <table>
88 <tr><th>src<th>dst
89 <tr><td>QASYMM8<td>QASYMM8
90 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
91 <tr><td>QSYMM16<td>QSYMM16
92 <tr><td>F16<td>F16
93 <tr><td>F32<td>F32
94 </table>
95<tr>
96 <td>CLActivationLayer
97 <td>
98 <ul>
99 <li>All
100 </ul>
101 <td>
102 <table>
103 <tr><th>src<th>dst
104 <tr><td>QASYMM8<td>QASYMM8
105 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
106 <tr><td>QSYMM16<td>QSYMM16
107 <tr><td>F16<td>F16
108 <tr><td>F32<td>F32
109 </table>
110<tr>
Teresa Charlin62687422021-04-28 10:58:49 +0100111 <td rowspan="2">ArgMinMaxLayer
112 <td rowspan="2" style="width:200px;"> Function to calculate the index of the minimum or maximum values in a tensor based on an axis.
113 <td rowspan="2">
114 <ul>
115 <li>ANEURALNETWORKS_ARGMAX
116 <li>ANEURALNETWORKS_ARGMIN
117 </ul>
118 <td>NEArgMinMaxLayer
119 <td>
120 <ul>
121 <li>All
122 </ul>
123 <td>
124 <table>
125 <tr><th>src<th>dst
126 <tr><td>QASYMM8<td>U32, S32
127 <tr><td>QASYMM8_SIGNED<td>U32, S32
128 <tr><td>S32<td>U32, S32
129 <tr><td>F16<td>U32, S32
130 <tr><td>F32<td>U32, S32
131 </table>
132<tr>
133 <td>CLArgMinMaxLayer
134 <td>
135 <ul>
136 <li>All
137 </ul>
138 <td>
139 <table>
140 <tr><th>src<th>dst
141 <tr><td>QASYMM8<td>U32, S32
142 <tr><td>QASYMM8_SIGNED<td>U32, S32
143 <tr><td>S32<td>U32, S32
144 <tr><td>F16<td>U32, S32
145 <tr><td>F32<td>U32, S32
146 </table>
147<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +0100148 <td rowspan="1">ArithmeticAddition
149 <td rowspan="1" style="width:200px;"> Function to add 2 tensors.
150 <td rowspan="1">
151 <ul>
152 <li>ANEURALNETWORKS_ADD
153 </ul>
154 <td>NEArithmeticAddition
155 <td>
156 <ul>
157 <li>All
158 </ul>
159 <td>
160 <table>
161 <tr><th>src0<th>src1<th>dst
162 <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
163 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
164 <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
165 <tr><td>QSYMM16<td>QSYMM16<td>S32
166 <tr><td>U8<td>U8<td>U8
167 <tr><td>U8<td>U8<td>S16
168 <tr><td>U8<td>S16<td>S16
169 <tr><td>S16<td>U8<td>S16
170 <tr><td>S16<td>S16<td>S16
171 <tr><td>S32<td>S32<td>S32
172 <tr><td>F16<td>F16<td>F16
173 <tr><td>F32<td>F32<td>F32
174 </table>
175<tr>
176 <td rowspan="1">ArithmeticSubtraction
177 <td rowspan="1" style="width:200px;"> Function to substract 2 tensors.
178 <td rowspan="1">
179 <ul>
180 <li>ANEURALNETWORKS_SUB
181 </ul>
182 <td>NEArithmeticSubtraction
183 <td>
184 <ul>
185 <li>All
186 </ul>
187 <td>
188 <table>
189 <tr><th>src0<th>src1<th>dst
190 <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
191 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
192 <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
193 <tr><td>QSYMM16<td>QSYMM16<td>S32
194 <tr><td>U8<td>U8<td>U8
195 <tr><td>U8<td>U8<td>S16
196 <tr><td>U8<td>S16<td>S16
197 <tr><td>S16<td>U8<td>S16
198 <tr><td>S16<td>S16<td>S16
199 <tr><td>S32<td>S32<td>S32
200 <tr><td>F16<td>F16<td>F16
201 <tr><td>F32<td>F32<td>F32
202 </table>
203<tr>
Teresa Charlin62687422021-04-28 10:58:49 +0100204 <td rowspan="2">BatchNormalizationLayer
205 <td rowspan="2" style="width:200px;"> Function to perform batch normalization.
206 <td rowspan="2">
207 <ul>
208 <li>n/a
209 </ul>
210 <td>NEBatchNormalizationLayer
211 <td>
212 <ul>
213 <li>NHWC
214 <li>NCHW
215 </ul>
216 <td>
217 <table>
218 <tr><th>src<th>dst
219 <tr><td>F32<td>F32
220 <tr><td>F16<td>F16
221 </table>
222<tr>
223 <td>CLBatchNormalizationLayer
224 <td>
225 <ul>
226 <li>NHWC
227 <li>NCHW
228 </ul>
229 <td>
230 <table>
231 <tr><th>src<th>dst
232 <tr><td>F32<td>F32
233 <tr><td>F16<td>F16
234 </table>
235<tr>
236 <td rowspan="2">BatchToSpaceLayer
237 <td rowspan="2" style="width:200px;"> Batch to space transformation.
238 <td rowspan="2">
239 <ul>
240 <li>ANEURALNETWORKS_BATCH_TO_SPACE_ND
241 </ul>
242 <td>NEBatchToSpaceLayer
243 <td>
244 <ul>
245 <li>NHWC
246 <li>NCHW
247 </ul>
248 <td>
249 <table>
250 <tr><th>src0<th>src1<th>dst
251 <tr><td>All<td>s32<td>All
252 </table>
253<tr>
254 <td>CLBatchToSpaceLayer
255 <td>
256 <ul>
257 <li>NHWC
258 <li>NCHW
259 </ul>
260 <td>
261 <table>
262 <tr><th>src0<th>src1<th>dst
263 <tr><td>All<td>s32<td>All
264 </table>
265<tr>
266 <td rowspan="2">BitwiseAnd
Jakub Sujakee301b32021-06-04 09:46:08 +0100267 <td rowspan="2" style="width:200px;"> Function to perform bitwise AND between 2 tensors.
Teresa Charlin62687422021-04-28 10:58:49 +0100268 <td rowspan="2">
269 <ul>
270 <li>ANEURALNETWORKS_LOGICAL_AND
271 </ul>
272 <td>NEBitwiseAnd
273 <td>
274 <ul>
275 <li>All
276 </ul>
277 <td>
278 <table>
279 <tr><th>src<th>dst
280 <tr><td>U8<td>U8
281 </table>
282<tr>
283 <td>CLBitwiseAnd
284 <td>
285 <ul>
286 <li>All
287 </ul>
288 <td>
289 <table>
290 <tr><th>src<th>dst
291 <tr><td>U8<td>U8
292 </table>
293<tr>
294 <td rowspan="2">BitwiseNot
Jakub Sujakee301b32021-06-04 09:46:08 +0100295 <td rowspan="2" style="width:200px;"> Function to perform bitwise NOT.
Teresa Charlin62687422021-04-28 10:58:49 +0100296 <td rowspan="2">
297 <ul>
298 <li>ANEURALNETWORKS_LOGICAL_NOT
299 </ul>
300 <td>NEBitwiseNot
301 <td>
302 <ul>
303 <li>All
304 </ul>
305 <td>
306 <table>
307 <tr><th>src<th>dst
308 <tr><td>U8<td>U8
309 </table>
310<tr>
311 <td>CLBitwiseNot
312 <td>
313 <ul>
314 <li>All
315 </ul>
316 <td>
317 <table>
318 <tr><th>src<th>dst
319 <tr><td>U8<td>U8
320 </table>
321<tr>
322 <td rowspan="2">BitwiseOr
Jakub Sujakee301b32021-06-04 09:46:08 +0100323 <td rowspan="2" style="width:200px;"> Function to perform bitwise OR between 2 tensors.
Teresa Charlin62687422021-04-28 10:58:49 +0100324 <td rowspan="2">
325 <ul>
326 <li>ANEURALNETWORKS_LOGICAL_OR
327 </ul>
328 <td>NEBitwiseOr
329 <td>
330 <ul>
331 <li>All
332 </ul>
333 <td>
334 <table>
335 <tr><th>src<th>dst
336 <tr><td>U8<td>U8
337 </table>
338<tr>
339 <td>CLBitwiseOr
340 <td>
341 <ul>
342 <li>All
343 </ul>
344 <td>
345 <table>
346 <tr><th>src<th>dst
347 <tr><td>U8<td>U8
348 </table>
349<tr>
350 <td rowspan="2">BitwiseXor
Jakub Sujakee301b32021-06-04 09:46:08 +0100351 <td rowspan="2" style="width:200px;"> Function to perform bitwise XOR between 2 tensors.
Teresa Charlin62687422021-04-28 10:58:49 +0100352 <td rowspan="2">
353 <ul>
354 <li>n/a
355 </ul>
356 <td>NEBitwiseXor
357 <td>
358 <ul>
359 <li>All
360 </ul>
361 <td>
362 <table>
363 <tr><th>src<th>dst
364 <tr><td>U8<td>U8
365 </table>
366<tr>
367 <td>CLBitwiseXor
368 <td>
369 <ul>
370 <li>All
371 </ul>
372 <td>
373 <table>
374 <tr><th>src<th>dst
375 <tr><td>U8<td>U8
376 </table>
377<tr>
378 <td rowspan="2">BoundingBoxTransform
379 <td rowspan="2" style="width:200px;"> Transform proposal bounding boxes to target bounding box using bounding box deltas.
380 <td rowspan="2">
381 <ul>
382 <li>n/a
383 </ul>
384 <td>NEBoundingBoxTransform
385 <td>
386 <ul>
387 <li>NHWC
388 <li>NCHW
389 </ul>
390 <td>
391 <table>
392 <tr><th>src0<th>src1<th>dst
393 <tr><td>QASYMM16<td>QASYMM8<td>QASYMM16
394 <tr><td>F16<td>F16<td>F16
395 <tr><td>F32<td>F32<td>F32
396 </table>
397<tr>
398 <td>CLBoundingBoxTransform
399 <td>
400 <ul>
401 <li>NHWC
402 <li>NCHW
403 </ul>
404 <td>
405 <table>
406 <tr><th>src0<th>src1<th>dst
407 <tr><td>QASYMM16<td>QASYMM8<td>QASYMM16
408 <tr><td>F16<td>F16<td>F16
409 <tr><td>F32<td>F32<td>F32
410 </table>
411<tr>
412 <td rowspan="2">Cast
413 <td rowspan="2" style="width:200px;"> Function to cast a tensor.
414 <td rowspan="2">
415 <ul>
416 <li>ANEURALNETWORKS_CAST
417 </ul>
418 <td>NECast
419 <td>
420 <ul>
421 <li>All
422 </ul>
423 <td>
424 <table>
425 <tr><th>src<th>dst
426 <tr><td>QASYMM8_SIGNED<td>S16, S32, F32, F16
427 <tr><td>QASYMM8<td>U16, S16, S32, F32, F16
428 <tr><td>U8<td>U16, S16, S32, F32, F16
429 <tr><td>U16<td>U8, U32
430 <tr><td>S16<td>QASYMM8_SIGNED, U8, S32
431 <tr><td>F16<td>QASYMM8_SIGNED, QASYMM8, F32, S32, U8
432 <tr><td>S32<td>QASYMM8_SIGNED, QASYMM8, F16, F32, U8
433 <tr><td>F32<td>QASYMM8_SIGNED, QASYMM8, BFLOAT16, F16, S32, U8
434 </table>
435<tr>
436 <td>CLCast
437 <td>
438 <ul>
439 <li>All
440 </ul>
441 <td>
442 <table>
443 <tr><th>src<th>dst
444 <tr><td>U8<td>S8, U16, S16, U32, S32, F16, F32
445 <tr><td>U16<td>U8, S8, S16, U32, S32, F16, F32
446 <tr><td>S16<td>U8, S8, U16, U32, S32, F16, F32
447 <tr><td>U32<td>U8, S8, U16, S16, S32, F16, F32
448 <tr><td>S32<td>U8, S8, U16, S16, U32, F16, F32
449 <tr><td>F16<td>U8, S8, U16, S16, U32, F32
450 <tr><td>F32<td>U8, S8, U16, S16, U32, F16
451 </table>
452<tr>
453 <td rowspan="2">ChannelShuffleLayer
454 <td rowspan="2" style="width:200px;"> Function to shuffle the channels of the input tensor.
455 <td rowspan="2">
456 <ul>
457 <li>ANEURALNETWORKS_CHANNEL_SHUFFLE
458 </ul>
459 <td>NEChannelShuffleLayer
460 <td>
461 <ul>
462 <li>NCHW
463 </ul>
464 <td>
465 <table>
466 <tr><th>src<th>dst
467 <tr><td>All<td>All
468 </table>
469<tr>
470 <td>CLChannelShuffleLayer
471 <td>
472 <ul>
473 <li>NCHW
474 </ul>
475 <td>
476 <table>
477 <tr><th>src<th>dst
478 <tr><td>All<td>All
479 </table>
480<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +0100481 <td rowspan="1">Comparison
482 <td rowspan="1" style="width:200px;"> Function to compare 2 tensors.
483 <td rowspan="1">
484 <ul>
485 <li>ANEURALNETWORKS_EQUAL
486 <li>ANEURALNETWORKS_GREATER
487 <li>ANEURALNETWORKS_GREATER_EQUAL
488 <li>ANEURALNETWORKS_LESS
489 <li>ANEURALNETWORKS_LESS_EQUAL
490 <li>ANEURALNETWORKS_NOT_EQUAL
491 </ul>
492 <td>CLComparison
493 <td>
494 <ul>
495 <li>All
496 </ul>
497 <td>
498 <table>
499 <tr><th>src0<th>src1<th>dst
500 <tr><td>All<td>All<td>U8
501 </table>
502<tr>
Sheri Zhanga47dcc22021-04-22 14:41:12 +0100503 <td rowspan="2">ConcatenateLayer
504 <td rowspan="2" style="width:200px;"> Function to concatenate tensors along a given axis.
505 <td rowspan="2">
506 <ul>
507 <li>ANEURALNETWORKS_CONCATENATION
508 </ul>
509 <td>NEConcatenateLayer
510 <td>
511 <ul>
512 <li>All
513 </ul>
514 <td>
515 <table>
516 <tr><th>src<th>dst
517 <tr><td>QASYMM8<td>QASYMM8
518 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
519 <tr><td>F16<td>F16
520 <tr><td>F32<td>F32
521 </table>
522<tr>
523 <td>CLConcatenateLayer
524 <td>
525 <ul>
526 <li>All
527 </ul>
528 <td>
529 <table>
530 <tr><th>src<th>dst
531 <tr><td>QASYMM8<td>QASYMM8
532 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
533 <tr><td>F16<td>F16
534 <tr><td>F32<td>F32
535 </table>
536<tr>
537 <td rowspan="2">ConvertFullyConnectedWeights
Jakub Sujakee301b32021-06-04 09:46:08 +0100538 <td rowspan="2" style="width:200px;"> Function to transpose the weights for the fully connected layer.
Sheri Zhanga47dcc22021-04-22 14:41:12 +0100539 <td rowspan="2">
540 <ul>
Teresa Charlin62687422021-04-28 10:58:49 +0100541 <li>n/a
Sheri Zhanga47dcc22021-04-22 14:41:12 +0100542 </ul>
543 <td>NEConvertFullyConnectedWeights
544 <td>
545 <ul>
546 <li>NHWC
547 <li>NCHW
548 </ul>
549 <td>
550 <table>
551 <tr><th>src<th>dst
552 <tr><td>All<td>All
553 </table>
554<tr>
555 <td>CLConvertFullyConnectedWeights
556 <td>
557 <ul>
558 <li>NHWC
559 <li>NCHW
560 </ul>
561 <td>
562 <table>
563 <tr><th>src<th>dst
564 <tr><td>All<td>All
565 </table>
566<tr>
Teresa Charlin62687422021-04-28 10:58:49 +0100567 <td rowspan="2">ConvolutionLayer
568 <td rowspan="2" style="width:200px;"> Function to compute a convolution layer.
569 <td rowspan="2">
570 <ul>
571 <li>ANEURALNETWORKS_CONV_2D
572 </ul>
573 <td>NEConvolutionLayer
574 <td>
575 <ul>
576 <li>NHWC
577 <li>NCHW
578 </ul>
579 <td>
580 <table>
581 <tr><th>src0<th>src1<th>src2<th>dst
582 <tr><td>F16<td>F16<td>F16<td>F16
583 <tr><td>F32<td>F32<td>F32<td>F32
584 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
585 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
586 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
587 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
588 </table>
589<tr>
590 <td>CLConvolutionLayer
591 <td>
592 <ul>
593 <li>NHWC
594 <li>NCHW
595 </ul>
596 <td>
597 <table>
598 <tr><th>src0<th>src1<th>src2<th>dst
599 <tr><td>F16<td>F16<td>F16<td>F16
600 <tr><td>F32<td>F32<td>F32<td>F32
601 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
602 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
603 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
604 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
605 </table>
606<tr>
Sheri Zhanga47dcc22021-04-22 14:41:12 +0100607 <td rowspan="2">Copy
608 <td rowspan="2" style="width:200px;"> Function to copy a tensor.
609 <td rowspan="2">
610 <ul>
Teresa Charlin62687422021-04-28 10:58:49 +0100611 <li>n/a
Sheri Zhanga47dcc22021-04-22 14:41:12 +0100612 </ul>
613 <td>NECopy
614 <td>
615 <ul>
616 <li>All
617 </ul>
618 <td>
619 <table>
620 <tr><th>src<th>dst
621 <tr><td>All<td>All
622 </table>
623<tr>
624 <td>CLCopy
625 <td>
626 <ul>
627 <li>All
628 </ul>
629 <td>
630 <table>
631 <tr><th>src<th>dst
632 <tr><td>All<td>All
633 </table>
634<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +0100635 <td rowspan="1">Crop
636 <td rowspan="1" style="width:200px;"> Performs a copy of input tensor to the output tensor.
637 <td rowspan="1">
638 <ul>
639 <li>n/a
640 </ul>
641 <td>CLCrop
642 <td>
643 <ul>
644 <li>NHWC
645 </ul>
646 <td>
647 <table>
648 <tr><th>src<th>dst
649 <tr><td>All<td>F32
650 </table>
651<tr>
Teresa Charlin62687422021-04-28 10:58:49 +0100652 <td rowspan="2">CropResize
653 <td rowspan="2" style="width:200px;"> Function to perform cropping and resizing.
654 <td rowspan="2">
655 <ul>
656 <li>n/a
657 </ul>
658 <td>NECropResize
659 <td>
660 <ul>
661 <li>NHWC
662 </ul>
663 <td>
664 <table>
665 <tr><th>src0<th>src1<th>src2<th>dst
666 <tr><td>All<td>F32<td>F32<td>F32
667 </table>
668<tr>
669 <td>CLCropResize
670 <td>
671 <ul>
672 <li>NHWC
673 </ul>
674 <td>
675 <table>
676 <tr><th>src0<th>src1<th>src2<th>dst
677 <tr><td>All<td>F32<td>F32<td>F32
678 </table>
679<tr>
680 <td rowspan="2">DeconvolutionLayer
Jakub Sujakee301b32021-06-04 09:46:08 +0100681 <td rowspan="2" style="width:200px;"> Function to compute a deconvolution or transpose convolution.
Teresa Charlin62687422021-04-28 10:58:49 +0100682 <td rowspan="2">
683 <ul>
684 <li>ANEURALNETWORKS_TRANSPOSE_CONV_2D
685 </ul>
686 <td>NEDeconvolutionLayer
687 <td>
688 <ul>
689 <li>NHWC
690 <li>NCHW
691 </ul>
692 <td>
693 <table>
694 <tr><th>src0<th>src1<th>src2<th>dst
695 <tr><td>F16<td>F16<td>F16<td>F16
696 <tr><td>F32<td>F32<td>F32<td>F32
697 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
698 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
699 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
700 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
701 </table>
702<tr>
703 <td>CLDeconvolutionLayer
704 <td>
705 <ul>
706 <li>NHWC
707 <li>NCHW
708 </ul>
709 <td>
710 <table>
711 <tr><th>src0<th>src1<th>src2<th>dst
712 <tr><td>F16<td>F16<td>F16<td>F16
713 <tr><td>F32<td>F32<td>F32<td>F32
714 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
715 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
716 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
717 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
718 </table>
719<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +0100720 <td rowspan="1">DeconvolutionLayerUpsample
721 <td rowspan="1" style="width:200px;"> Function to execute deconvolution upsample on OpenCL.
722 <td rowspan="1">
723 <ul>
724 <li>ANEURALNETWORKS_TRANSPOSE_CONV_2D
725 </ul>
726 <td>CLDeconvolutionLayerUpsample
727 <td>
728 <ul>
729 <li>NHWC
730 <li>NCHW
731 </ul>
732 <td>
733 <table>
734 <tr><th>src<th>dst
735 <tr><td>All<td>All
736 </table>
737<tr>
Teresa Charlin62687422021-04-28 10:58:49 +0100738 <td rowspan="2">DepthConvertLayer
739 <td rowspan="2" style="width:200px;"> Performs a down-scaling depth conversion.
740 <td rowspan="2">
741 <ul>
742 <li>n/a
743 </ul>
744 <td>NEDepthConvertLayer
745 <td>
746 <ul>
747 <li>All
748 </ul>
749 <td>
750 <table>
751 <tr><th>src<th>dst
752 <tr><td>QASYMM8<td>F16, F32
753 <tr><td>U8<td>U16, S16, S32
754 <tr><td>U16<td>U8, U32
755 <tr><td>S16<td>U8, S32
756 <tr><td>BFLOAT16<td>F32
757 <tr><td>F16<td>QASYMM8, F32
758 <tr><td>F32<td>QASYMM8, F16, BFLOAT16
759 </table>
760<tr>
761 <td>CLDepthConvertLayer
762 <td>
763 <ul>
764 <li>All
765 </ul>
766 <td>
767 <table>
768 <tr><th>src<th>dst
769 <tr><td>U8<td>S8, U16, S16, U32, S32, F16, F32
770 <tr><td>U16<td>U8, S8, S16, U32, S32, F16, F32
771 <tr><td>S16<td>U8, S8, U16, U32, S32, F16, F32
772 <tr><td>U32<td>U8, S8, U16, S16, S32, F16, F32
773 <tr><td>S32<td>U8, S8, U16, S16, U32, F16, F32
774 <tr><td>F16<td>U8, S8, U16, S16, U32, F32
775 <tr><td>F32<td>U8, S8, U16, S16, U32, F16
776 </table>
777<tr>
778 <td rowspan="2">DepthToSpaceLayer
779 <td rowspan="2" style="width:200px;"> Depth to Space transformation.
780 <td rowspan="2">
781 <ul>
782 <li>ANEURALNETWORKS_DEPTH_TO_SPACE
783 </ul>
784 <td>NEDepthToSpaceLayer
785 <td>
786 <ul>
787 <li>NHWC
788 <li>NCHW
789 </ul>
790 <td>
791 <table>
792 <tr><th>src<th>dst
793 <tr><td>All<td>All
794 </table>
795<tr>
796 <td>CLDepthToSpaceLayer
797 <td>
798 <ul>
799 <li>NHWC
800 <li>NCHW
801 </ul>
802 <td>
803 <table>
804 <tr><th>src<th>dst
805 <tr><td>All<td>All
806 </table>
807<tr>
808 <td rowspan="2">DepthwiseConvolutionLayer
809 <td rowspan="2" style="width:200px;"> Function to perform depthwise separable convolution.
810 <td rowspan="2">
811 <ul>
812 <li>ANEURALNETWORKS_DEPTHWISE_CONV_2D
813 </ul>
814 <td>NEDepthwiseConvolutionLayer
815 <td>
816 <ul>
817 <li>NHWC
818 <li>NCHW
819 </ul>
820 <td>
821 <table>
822 <tr><th>src0<th>src1<th>src2<th>dst
823 <tr><td>F16<td>F16<td>F16<td>F16
824 <tr><td>F32<td>F32<td>F32<td>F32
825 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
826 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
827 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
828 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
829 </table>
830<tr>
831 <td>CLDepthwiseConvolutionLayer
832 <td>
833 <ul>
834 <li>NHWC
835 <li>NCHW
836 </ul>
837 <td>
838 <table>
839 <tr><th>src0<th>src1<th>src2<th>dst
840 <tr><td>F16<td>F16<td>F16<td>F16
841 <tr><td>F32<td>F32<td>F32<td>F32
842 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
843 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
844 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
845 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
846 </table>
847<tr>
Sheri Zhanga47dcc22021-04-22 14:41:12 +0100848 <td rowspan="2">DequantizationLayer
Teresa Charlin62687422021-04-28 10:58:49 +0100849 <td rowspan="2" style="width:200px;"> Function to dequantize the values in a tensor.
Sheri Zhanga47dcc22021-04-22 14:41:12 +0100850 <td rowspan="2">
851 <ul>
852 <li>ANEURALNETWORKS_DEQUANTIZE
853 </ul>
854 <td>NEDequantizationLayer
855 <td>
856 <ul>
857 <li>All
858 </ul>
859 <td>
860 <table>
861 <tr><th>src<th>dst
Teresa Charlin62687422021-04-28 10:58:49 +0100862 <tr><td>QASYMM8<td>F16, F32
863 <tr><td>QASYMM8_SIGNED<td>F16, F32
864 <tr><td>QSYMM8_PER_CHANNEL<td>F16, F32
865 <tr><td>QSYMM8<td>F16, F32
866 <tr><td>QSYMM16<td>F16, F32
Sheri Zhanga47dcc22021-04-22 14:41:12 +0100867 </table>
868<tr>
869 <td>CLDequantizationLayer
870 <td>
871 <ul>
872 <li>All
873 </ul>
874 <td>
875 <table>
876 <tr><th>src<th>dst
Teresa Charlin62687422021-04-28 10:58:49 +0100877 <tr><td>QASYMM8<td>F16, F32
878 <tr><td>QASYMM8_SIGNED<td>F16, F32
879 <tr><td>QSYMM8_PER_CHANNEL<td>F16, F32
880 <tr><td>QSYMM8<td>F16, F32
881 <tr><td>QSYMM16<td>F16, F32
Sheri Zhanga47dcc22021-04-22 14:41:12 +0100882 </table>
883<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +0100884 <td rowspan="1">DetectionPostProcessLayer
885 <td rowspan="1" style="width:200px;"> Function to generate the detection output based on center size encoded boxes, class prediction and anchors by doing non maximum suppression (NMS).
886 <td rowspan="1">
887 <ul>
888 <li>ANEURALNETWORKS_DETECTION_POSTPROCESSING
889 </ul>
890 <td>NEDetectionPostProcessLayer
891 <td>
892 <ul>
893 <li>All
894 </ul>
895 <td>
896 <table>
897 <tr><th>src0 - src2<th>dst0 - dst3
898 <tr><td>QASYMM8<td>F32
899 <tr><td>QASYMM8_SIGNED<td>F32
900 <tr><td>F32<td>F32
901 </table>
902<tr>
Sheri Zhanga47dcc22021-04-22 14:41:12 +0100903 <td rowspan="2">DirectConvolutionLayer
Teresa Charlin62687422021-04-28 10:58:49 +0100904 <td rowspan="2" style="width:200px;"> Function to compute direct convolution.
Sheri Zhanga47dcc22021-04-22 14:41:12 +0100905 <td rowspan="2">
906 <ul>
907 <li>ANEURALNETWORKS_CONV_2D
908 </ul>
909 <td>NEDirectConvolutionLayer
910 <td>
911 <ul>
912 <li>NHWC
913 <li>NCHW
914 </ul>
915 <td>
916 <table>
917 <tr><th>src0<th>src1<th>src2<th>dst
918 <tr><td>F16<td>F16<td>F16<td>F16
919 <tr><td>F32<td>F32<td>F32<td>F32
920 </table>
921<tr>
922 <td>CLDirectConvolutionLayer
923 <td>
924 <ul>
925 <li>NHWC
926 <li>NCHW
927 </ul>
928 <td>
929 <table>
930 <tr><th>src0<th>src1<th>src2<th>dst
931 <tr><td>F16<td>F16<td>F16<td>F16
932 <tr><td>F32<td>F32<td>F32<td>F32
933 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
934 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
935 </table>
936<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +0100937 <td rowspan="1">DirectDeconvolutionLayer
938 <td rowspan="1" style="width:200px;"> Function to run the deconvolution layer.
939 <td rowspan="1">
940 <ul>
941 <li>ANEURALNETWORKS_TRANSPOSE_CONV_2D
942 </ul>
943 <td>CLDirectDeconvolutionLayer
944 <td>
945 <ul>
946 <li>NHWC
947 <li>NCHW
948 </ul>
949 <td>
950 <table>
951 <tr><th>src0<th>src1<th>src2<th>dst
952 <tr><td>F16<td>F16<td>F16<td>F16
953 <tr><td>F32<td>F32<td>F32<td>F32
954 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
955 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
956 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
957 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
958 </table>
959<tr>
Jakub Sujakee301b32021-06-04 09:46:08 +0100960 <td rowspan="13">ElementwiseOperations
Sheri Zhang6124ce62021-05-04 14:03:13 +0100961 <td rowspan="13" style="width:200px;"> Function to perform in Cpu: - Div - Max - Min - Pow - SquaredDiff - Comparisons (Equal, greater, greater_equal, less, less_equal, not_equal) Function to perform in CL: - Add - Sub - Div - Max - Min - Pow - SquaredDiff
962 <td rowspan="13">
963 <ul>
964 <li>ANEURALNETWORKS_MAXIMUM
965 <li>ANEURALNETWORKS_MINIMUM
966 <li>ANEURALNETWORKS_POW
967 <li>ANEURALNETWORKS_DIV
968 <li>ANEURALNETWORKS_ADD
969 <li>ANEURALNETWORKS_SUB
970 <li>ANEURALNETWORKS_EQUAL
971 <li>ANEURALNETWORKS_GREATER
972 <li>ANEURALNETWORKS_GREATER_EQUAL
973 <li>ANEURALNETWORKS_LESS
974 <li>ANEURALNETWORKS_LESS_EQUAL
975 <li>ANEURALNETWORKS_NOT_EQUAL
976 </ul>
977 <td>NEElementwiseMax
978 <td>
979 <ul>
980 <li>All
981 </ul>
982 <td>
983 <table>
984 <tr><th>src0<th>src1<th>dst
985 <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
986 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
987 <tr><td>S32<td>S32<td>S32
988 <tr><td>S16<td>S16<td>S16
989 <tr><td>F16<td>F16<td>F16
990 <tr><td>F32<td>F32<td>F32
991 </table>
992<tr>
993 <td>NEElementwiseMin
994 <td>
995 <ul>
996 <li>All
997 </ul>
998 <td>
999 <table>
1000 <tr><th>src0<th>src1<th>dst
1001 <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
1002 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
1003 <tr><td>S32<td>S32<td>S32
1004 <tr><td>S16<td>S16<td>S16
1005 <tr><td>F16<td>F16<td>F16
1006 <tr><td>F32<td>F32<td>F32
1007 </table>
1008<tr>
1009 <td>NEElementwiseSquaredDiff
1010 <td>
1011 <ul>
1012 <li>All
1013 </ul>
1014 <td>
1015 <table>
1016 <tr><th>src0<th>src1<th>dst
1017 <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
1018 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
1019 <tr><td>S32<td>S32<td>S32
1020 <tr><td>S16<td>S16<td>S16
1021 <tr><td>F16<td>F16<td>F16
1022 <tr><td>F32<td>F32<td>F32
1023 </table>
1024<tr>
1025 <td>NEElementwiseDivision
1026 <td>
1027 <ul>
1028 <li>All
1029 </ul>
1030 <td>
1031 <table>
1032 <tr><th>src0<th>src1<th>dst
1033 <tr><td>F16<td>F16<td>F16
1034 <tr><td>F32<td>F32<td>F32
1035 </table>
1036<tr>
1037 <td>NEElementwisePower
1038 <td>
1039 <ul>
1040 <li>All
1041 </ul>
1042 <td>
1043 <table>
1044 <tr><th>src0<th>src1<th>dst
1045 <tr><td>F16<td>F16<td>F16
1046 <tr><td>F32<td>F32<td>F32
1047 </table>
1048<tr>
1049 <td>NEElementwiseComparison
1050 <td>
1051 <ul>
1052 <li>All
1053 </ul>
1054 <td>
1055 <table>
1056 <tr><th>src0<th>src1<th>dst
1057 <tr><td>QASYMM8<td>QASYMM8<td>U8
1058 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>U8
1059 <tr><td>S32<td>S32<td>U8
1060 <tr><td>U8<td>U8<td>U8
1061 <tr><td>S16<td>S16<td>U8
1062 <tr><td>F16<td>F16<td>U8
1063 <tr><td>F32<td>F32<td>U8
1064 </table>
1065<tr>
1066 <td>CLArithmeticAddition
1067 <td>
1068 <ul>
1069 <li>All
1070 </ul>
1071 <td>
1072 <table>
1073 <tr><th>src0<th>src1<th>dst
1074 <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
1075 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
1076 <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
1077 <tr><td>U8<td>U8<td>U8
1078 <tr><td>U8<td>U8<td>S16
1079 <tr><td>U8<td>S16<td>S16
1080 <tr><td>S16<td>U8<td>S16
1081 <tr><td>S16<td>S16<td>S16
1082 <tr><td>S32<td>S32<td>S32
1083 <tr><td>F16<td>F16<td>F16
1084 <tr><td>F32<td>F32<td>F32
1085 </table>
1086<tr>
1087 <td>CLArithmeticSubtraction
1088 <td>
1089 <ul>
1090 <li>All
1091 </ul>
1092 <td>
1093 <table>
1094 <tr><th>src0<th>src1<th>dst
1095 <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
1096 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
1097 <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
1098 <tr><td>U8<td>U8<td>U8
1099 <tr><td>U8<td>U8<td>S16
1100 <tr><td>U8<td>S16<td>S16
1101 <tr><td>S16<td>U8<td>S16
1102 <tr><td>S16<td>S16<td>S16
1103 <tr><td>S32<td>S32<td>S32
1104 <tr><td>F16<td>F16<td>F16
1105 <tr><td>F32<td>F32<td>F32
1106 </table>
1107<tr>
1108 <td>CLArithmeticDivision
1109 <td>
1110 <ul>
1111 <li>All
1112 </ul>
1113 <td>
1114 <table>
1115 <tr><th>src0<th>src1<th>dst
1116 <tr><td>F16<td>F16<td>F16
1117 <tr><td>F32<td>F32<td>F32
1118 </table>
1119<tr>
1120 <td>CLElementwiseMax
1121 <td>
1122 <ul>
1123 <li>All
1124 </ul>
1125 <td>
1126 <table>
1127 <tr><th>src0<th>src1<th>dst
1128 <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
1129 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
1130 <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
1131 <tr><td>U8<td>U8<td>U8
1132 <tr><td>S16<td>S16<td>S16
1133 <tr><td>S32<td>S32<td>S32
1134 <tr><td>U32<td>U32<td>U32
1135 <tr><td>F16<td>F16<td>F16
1136 <tr><td>F32<td>F32<td>F32
1137 </table>
1138<tr>
1139 <td>CLElementwiseMin
1140 <td>
1141 <ul>
1142 <li>All
1143 </ul>
1144 <td>
1145 <table>
1146 <tr><th>src0<th>src1<th>dst
1147 <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
1148 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
1149 <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
1150 <tr><td>U8<td>U8<td>U8
1151 <tr><td>S16<td>S16<td>S16
1152 <tr><td>S32<td>S32<td>S32
1153 <tr><td>U32<td>U32<td>U32
1154 <tr><td>F16<td>F16<td>F16
1155 <tr><td>F32<td>F32<td>F32
1156 </table>
1157<tr>
1158 <td>CLElementwiseSquaredDiff
1159 <td>
1160 <ul>
1161 <li>All
1162 </ul>
1163 <td>
1164 <table>
1165 <tr><th>src0<th>src1<th>dst
1166 <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
1167 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
1168 <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
1169 <tr><td>U8<td>U8<td>U8
1170 <tr><td>S16<td>S16<td>S16
1171 <tr><td>F16<td>F16<td>F16
1172 <tr><td>F32<td>F32<td>F32
1173 </table>
1174<tr>
1175 <td>CLElementwisePower
1176 <td>
1177 <ul>
1178 <li>All
1179 </ul>
1180 <td>
1181 <table>
1182 <tr><th>src0<th>src1<th>dst
1183 <tr><td>F16<td>F16<td>F16
1184 <tr><td>F32<td>F32<td>F32
1185 </table>
1186<tr>
1187 <td rowspan="8">ElementwiseUnaryLayer
1188 <td rowspan="8" style="width:200px;"> Function to perform: - Rsqrt - Exp - Neg - Log - Abs - Round - Sin
1189 <td rowspan="8">
1190 <ul>
1191 <li>ANEURALNETWORKS_ABS
1192 <li>ANEURALNETWORKS_EXP
1193 <li>ANEURALNETWORKS_LOG
1194 <li>ANEURALNETWORKS_NEG
1195 <li>ANEURALNETWORKS_RSQRT
1196 <li>ANEURALNETWORKS_SIN
1197 </ul>
1198 <td>NEElementwiseUnaryLayer
1199 <td>
1200 <ul>
1201 <li>All
1202 </ul>
1203 <td>
1204 <table>
1205 <tr><th>src<th>dst
1206 <tr><td>F16<td>F16
1207 <tr><td>F32<td>F32
1208 <tr><td>S32<td>S32
1209 </table>
1210<tr>
1211 <td>CLRsqrtLayer
1212 <td>
1213 <ul>
1214 <li>All
1215 </ul>
1216 <td>
1217 <table>
1218 <tr><th>src<th>dst
1219 <tr><td>F16<td>F16
1220 <tr><td>F32<td>F32
1221 </table>
1222<tr>
1223 <td>CLExpLayer
1224 <td>
1225 <ul>
1226 <li>All
1227 </ul>
1228 <td>
1229 <table>
1230 <tr><th>src<th>dst
1231 <tr><td>F16<td>F16
1232 <tr><td>F32<td>F32
1233 </table>
1234<tr>
1235 <td>CLNegLayer
1236 <td>
1237 <ul>
1238 <li>All
1239 </ul>
1240 <td>
1241 <table>
1242 <tr><th>src<th>dst
1243 <tr><td>F16<td>F16
1244 <tr><td>F32<td>F32
Jakub Sujakee301b32021-06-04 09:46:08 +01001245 <tr><td>S32<td>S32
Sheri Zhang6124ce62021-05-04 14:03:13 +01001246 </table>
1247<tr>
1248 <td>CLSinLayer
1249 <td>
1250 <ul>
1251 <li>All
1252 </ul>
1253 <td>
1254 <table>
1255 <tr><th>src<th>dst
1256 <tr><td>F16<td>F16
1257 <tr><td>F32<td>F32
1258 </table>
1259<tr>
1260 <td>CLLogLayer
1261 <td>
1262 <ul>
1263 <li>All
1264 </ul>
1265 <td>
1266 <table>
1267 <tr><th>src<th>dst
1268 <tr><td>F16<td>F16
1269 <tr><td>F32<td>F32
1270 </table>
1271<tr>
1272 <td>CLAbsLayer
1273 <td>
1274 <ul>
1275 <li>All
1276 </ul>
1277 <td>
1278 <table>
1279 <tr><th>src<th>dst
1280 <tr><td>F16<td>F16
1281 <tr><td>F32<td>F32
1282 </table>
1283<tr>
1284 <td>CLRoundLayer
1285 <td>
1286 <ul>
1287 <li>All
1288 </ul>
1289 <td>
1290 <table>
1291 <tr><th>src<th>dst
1292 <tr><td>F16<td>F16
1293 <tr><td>F32<td>F32
1294 </table>
1295<tr>
Sheri Zhanga47dcc22021-04-22 14:41:12 +01001296 <td rowspan="2">FFT1D
Teresa Charlin62687422021-04-28 10:58:49 +01001297 <td rowspan="2" style="width:200px;"> Fast Fourier Transform 1D.
Sheri Zhanga47dcc22021-04-22 14:41:12 +01001298 <td rowspan="2">
1299 <ul>
Teresa Charlin62687422021-04-28 10:58:49 +01001300 <li>n/a
Sheri Zhanga47dcc22021-04-22 14:41:12 +01001301 </ul>
1302 <td>NEFFT1D
1303 <td>
1304 <ul>
1305 <li>All
1306 </ul>
1307 <td>
1308 <table>
1309 <tr><th>src<th>dst
1310 <tr><td>F32<td>F32
1311 </table>
1312<tr>
1313 <td>CLFFT1D
1314 <td>
1315 <ul>
1316 <li>All
1317 </ul>
1318 <td>
1319 <table>
1320 <tr><th>src<th>dst
1321 <tr><td>F32<td>F32
1322 <tr><td>F16<td>F16
1323 </table>
1324<tr>
1325 <td rowspan="2">FFT2D
Teresa Charlin62687422021-04-28 10:58:49 +01001326 <td rowspan="2" style="width:200px;"> Fast Fourier Transform 2D.
Sheri Zhanga47dcc22021-04-22 14:41:12 +01001327 <td rowspan="2">
1328 <ul>
Teresa Charlin62687422021-04-28 10:58:49 +01001329 <li>n/a
Sheri Zhanga47dcc22021-04-22 14:41:12 +01001330 </ul>
1331 <td>NEFFT2D
1332 <td>
1333 <ul>
1334 <li>All
1335 </ul>
1336 <td>
1337 <table>
1338 <tr><th>src<th>dst
1339 <tr><td>F32<td>F32
1340 </table>
1341<tr>
1342 <td>CLFFT2D
1343 <td>
1344 <ul>
1345 <li>All
1346 </ul>
1347 <td>
1348 <table>
1349 <tr><th>src<th>dst
1350 <tr><td>F32<td>F32
1351 <tr><td>F16<td>F16
1352 </table>
1353<tr>
1354 <td rowspan="2">FFTConvolutionLayer
Teresa Charlin62687422021-04-28 10:58:49 +01001355 <td rowspan="2" style="width:200px;"> Fast Fourier Transform Convolution.
Sheri Zhanga47dcc22021-04-22 14:41:12 +01001356 <td rowspan="2">
1357 <ul>
1358 <li>ANEURALNETWORKS_CONV_2D
1359 </ul>
1360 <td>NEFFTConvolutionLayer
1361 <td>
1362 <ul>
1363 <li>All
1364 </ul>
1365 <td>
1366 <table>
1367 <tr><th>src<th>dst
1368 <tr><td>F32<td>F32
1369 </table>
1370<tr>
1371 <td>CLFFTConvolutionLayer
1372 <td>
1373 <ul>
1374 <li>All
1375 </ul>
1376 <td>
1377 <table>
1378 <tr><th>src<th>dst
1379 <tr><td>F32<td>F32
1380 <tr><td>F16<td>F16
1381 </table>
1382<tr>
1383 <td rowspan="2">Fill
Teresa Charlin62687422021-04-28 10:58:49 +01001384 <td rowspan="2" style="width:200px;"> Set the values of a tensor with a given value.
Sheri Zhanga47dcc22021-04-22 14:41:12 +01001385 <td rowspan="2">
1386 <ul>
1387 <li>ANEURALNETWORKS_FILL
1388 </ul>
1389 <td>NEFill
1390 <td>
1391 <ul>
1392 <li>All
1393 </ul>
1394 <td>
1395 <table>
1396 <tr><th>src<th>dst
1397 <tr><td>All<td>All
1398 </table>
1399<tr>
1400 <td>CLFill
1401 <td>
1402 <ul>
1403 <li>All
1404 </ul>
1405 <td>
1406 <table>
1407 <tr><th>src<th>dst
1408 <tr><td>All<td>All
1409 </table>
1410<tr>
Teresa Charlin62687422021-04-28 10:58:49 +01001411 <td rowspan="2">FillBorder
Jakub Sujakee301b32021-06-04 09:46:08 +01001412 <td rowspan="2" style="width:200px;"> Function to fill the borders within the XY-planes.
Teresa Charlin62687422021-04-28 10:58:49 +01001413 <td rowspan="2">
1414 <ul>
1415 <li>n/a
1416 </ul>
1417 <td>NEFillBorder
1418 <td>
1419 <ul>
1420 <li>All
1421 </ul>
1422 <td>
1423 <table>
1424 <tr><th>src<th>dst
1425 <tr><td>All<td>All
1426 </table>
1427<tr>
1428 <td>CLFillBorder
1429 <td>
1430 <ul>
1431 <li>All
1432 </ul>
1433 <td>
1434 <table>
1435 <tr><th>src<th>dst
1436 <tr><td>All<td>All
1437 </table>
1438<tr>
1439 <td rowspan="2">FlattenLayer
1440 <td rowspan="2" style="width:200px;"> Reshape a tensor to be 1D
1441 <td rowspan="2">
1442 <ul>
1443 <li>ANEURALNETWORKS_RESHAPE
1444 </ul>
1445 <td>NEFlattenLayer
1446 <td>
1447 <ul>
1448 <li>All
1449 </ul>
1450 <td>
1451 <table>
1452 <tr><th>src<th>dst
1453 <tr><td>All<td>All
1454 </table>
1455<tr>
1456 <td>CLFlattenLayer
1457 <td>
1458 <ul>
1459 <li>All
1460 </ul>
1461 <td>
1462 <table>
1463 <tr><th>src<th>dst
1464 <tr><td>All<td>All
1465 </table>
1466<tr>
Sheri Zhanga47dcc22021-04-22 14:41:12 +01001467 <td rowspan="2">Floor
Teresa Charlin62687422021-04-28 10:58:49 +01001468 <td rowspan="2" style="width:200px;"> Round the value to the lowest number.
Sheri Zhanga47dcc22021-04-22 14:41:12 +01001469 <td rowspan="2">
1470 <ul>
1471 <li>ANEURALNETWORKS_FLOOR
1472 </ul>
1473 <td>NEFloor
1474 <td>
1475 <ul>
1476 <li>All
1477 </ul>
1478 <td>
1479 <table>
1480 <tr><th>src<th>dst
1481 <tr><td>F32<td>F32
1482 <tr><td>F16<td>F16
1483 </table>
1484<tr>
1485 <td>CLFloor
1486 <td>
1487 <ul>
1488 <li>All
1489 </ul>
1490 <td>
1491 <table>
1492 <tr><th>src<th>dst
1493 <tr><td>F32<td>F32
1494 <tr><td>F16<td>F16
1495 </table>
1496<tr>
Teresa Charlin62687422021-04-28 10:58:49 +01001497 <td rowspan="2">FullyConnectedLayer
1498 <td rowspan="2" style="width:200px;"> Function to perform a fully connected / dense layer.
1499 <td rowspan="2">
1500 <ul>
1501 <li>ANEURALNETWORKS_FULLY_CONNECTED
1502 </ul>
Sheri Zhang6124ce62021-05-04 14:03:13 +01001503 <td>NEFullyConnectedLayer
Teresa Charlin62687422021-04-28 10:58:49 +01001504 <td>
1505 <ul>
1506 <li>NHWC
1507 <li>NCHW
1508 </ul>
1509 <td>
1510 <table>
1511 <tr><th>src0<th>src1<th>src2<th>dst
1512 <tr><td>F16<td>F16<td>F16<td>F16
1513 <tr><td>F32<td>F32<td>F32<td>F32
1514 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
1515 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
1516 </table>
1517<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +01001518 <td>CLFullyConnectedLayer
Teresa Charlin62687422021-04-28 10:58:49 +01001519 <td>
1520 <ul>
1521 <li>NHWC
1522 <li>NCHW
1523 </ul>
1524 <td>
1525 <table>
1526 <tr><th>src0<th>src1<th>src2<th>dst
1527 <tr><td>F16<td>F16<td>F16<td>F16
1528 <tr><td>F32<td>F32<td>F32<td>F32
1529 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
1530 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
1531 </table>
1532<tr>
1533 <td rowspan="2">FuseBatchNormalization
1534 <td rowspan="2" style="width:200px;"> Function to fuse the batch normalization node to a preceding convolution node.
1535 <td rowspan="2">
1536 <ul>
1537 <li>n/a
1538 </ul>
1539 <td>NEFuseBatchNormalization
1540 <td>
1541 <ul>
1542 <li>NHWC
1543 <li>NCHW
1544 </ul>
1545 <td>
1546 <table>
1547 <tr><th>src<th>dst
1548 <tr><td>F32<td>F32
1549 <tr><td>F16<td>F16
1550 </table>
1551<tr>
1552 <td>CLFuseBatchNormalization
1553 <td>
1554 <ul>
1555 <li>NHWC
1556 <li>NCHW
1557 </ul>
1558 <td>
1559 <table>
1560 <tr><th>src<th>dst
1561 <tr><td>F32<td>F32
1562 <tr><td>F16<td>F16
1563 </table>
1564<tr>
1565 <td rowspan="2">Gather
1566 <td rowspan="2" style="width:200px;"> Performs the Gather operation along the chosen axis.
1567 <td rowspan="2">
1568 <ul>
1569 <li>ANEURALNETWORKS_GATHER
1570 </ul>
1571 <td>NEGather
1572 <td>
1573 <ul>
1574 <li>All
1575 </ul>
1576 <td>
1577 <table>
1578 <tr><th>src<th>dst
1579 <tr><td>All<td>All
1580 </table>
1581<tr>
1582 <td>CLGather
1583 <td>
1584 <ul>
1585 <li>All
1586 </ul>
1587 <td>
1588 <table>
1589 <tr><th>src<th>dst
1590 <tr><td>All<td>All
1591 </table>
1592<tr>
1593 <td rowspan="2">GEMM
1594 <td rowspan="2" style="width:200px;"> General Matrix Multiplication.
1595 <td rowspan="2">
1596 <ul>
1597 <li>n/a
1598 </ul>
1599 <td>NEGEMM
1600 <td>
1601 <ul>
1602 <li>All
1603 </ul>
1604 <td>
1605 <table>
1606 <tr><th>src0<th>src1<th>src2<th>dst
1607 <tr><td>F32<td>F32<td>F32<td>F32
1608 <tr><td>F16<td>F16<td>F16<td>F16
1609 <tr><td>BFLOAT16<td>BFLOAT16<td>BFLOAT16<td>BFLOAT16
1610 </table>
1611<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +01001612 <td>CLGEMM
Teresa Charlin62687422021-04-28 10:58:49 +01001613 <td>
1614 <ul>
1615 <li>All
1616 </ul>
1617 <td>
1618 <table>
1619 <tr><th>src0<th>src1<th>src2<th>dst
1620 <tr><td>F32<td>F32<td>F32<td>F32
1621 <tr><td>F16<td>F16<td>F16<td>F16
1622 </table>
1623<tr>
Jakub Sujakee301b32021-06-04 09:46:08 +01001624 <td rowspan="1">GEMMConv2d
Sheri Zhang6124ce62021-05-04 14:03:13 +01001625 <td rowspan="1" style="width:200px;"> General Matrix Multiplication.
1626 <td rowspan="1">
1627 <ul>
1628 <li>ANEURALNETWORKS_CONV_2D
1629 </ul>
1630 <td>NEGEMMConv2d
1631 <td>
1632 <ul>
1633 <li>All
1634 </ul>
1635 <td>
1636 <table>
1637 <tr><th>src0<th>src1<th>src2<th>dst
1638 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
1639 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
1640 <tr><td>F16<td>F16<td>F16<td>F16
1641 <tr><td>F32<td>F32<td>F32<td>F32
1642 <tr><td>BFLOAT16<td>BFLOAT16<td>BFLOAT16<td>BFLOAT16
1643 </table>
1644<tr>
Teresa Charlin62687422021-04-28 10:58:49 +01001645 <td rowspan="2">GEMMConvolutionLayer
1646 <td rowspan="2" style="width:200px;"> General Matrix Multiplication.
1647 <td rowspan="2">
1648 <ul>
1649 <li>ANEURALNETWORKS_CONV_2D
1650 </ul>
Sheri Zhang6124ce62021-05-04 14:03:13 +01001651 <td>NEGEMMConvolutionLayer
Teresa Charlin62687422021-04-28 10:58:49 +01001652 <td>
1653 <ul>
1654 <li>NHWC
1655 <li>NCHW
1656 </ul>
1657 <td>
1658 <table>
1659 <tr><th>src0<th>src1<th>src2<th>dst
1660 <tr><td>F16<td>F16<td>F16<td>F16
1661 <tr><td>F32<td>F32<td>F32<td>F32
1662 <tr><td>BFLOAT16<td>BFLOAT16<td>BFLOAT16<td>BFLOAT16
1663 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
1664 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
1665 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
1666 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
1667 </table>
1668<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +01001669 <td>CLGEMMConvolutionLayer
Teresa Charlin62687422021-04-28 10:58:49 +01001670 <td>
1671 <ul>
1672 <li>NHWC
1673 <li>NCHW
1674 </ul>
1675 <td>
1676 <table>
1677 <tr><th>src0<th>src1<th>src2<th>dst
1678 <tr><td>F16<td>F16<td>F16<td>F16
1679 <tr><td>F32<td>F32<td>F32<td>F32
1680 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
1681 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
1682 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
1683 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
1684 </table>
1685<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +01001686 <td rowspan="1">GEMMDeconvolutionLayer
1687 <td rowspan="1" style="width:200px;"> General Matrix Multiplication.
1688 <td rowspan="1">
1689 <ul>
1690 <li>ANEURALNETWORKS_TRANSPOSE_CONV_2D
1691 </ul>
1692 <td>CLGEMMDeconvolutionLayer
1693 <td>
1694 <ul>
1695 <li>NHWC
1696 </ul>
1697 <td>
1698 <table>
1699 <tr><th>src0<th>src1<th>src2<th>dst
1700 <tr><td>F16<td>F16<td>F16<td>F16
1701 <tr><td>F32<td>F32<td>F32<td>F32
1702 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
1703 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
1704 </table>
1705<tr>
Teresa Charlin62687422021-04-28 10:58:49 +01001706 <td rowspan="2">GEMMLowpMatrixMultiplyCore
1707 <td rowspan="2" style="width:200px;"> General Matrix Multiplication.
1708 <td rowspan="2">
1709 <ul>
1710 <li>n/a
1711 </ul>
1712 <td>NEGEMMLowpMatrixMultiplyCore
1713 <td>
1714 <ul>
1715 <li>NHWC
1716 <li>NCHW
1717 </ul>
1718 <td>
1719 <table>
1720 <tr><th>src0<th>src1<th>src2<th>dst
1721 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
1722 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
1723 <tr><td>QASYMM8<td>QSYMM8<td>S32<td>QASYMM8
1724 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>S32
1725 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>S32
1726 <tr><td>QASYMM8<td>QSYMM8<td>S32<td>S32
1727 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
1728 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
1729 <tr><td>QASYMM8_SIGNED<td>QSYMM8<td>S32<td>QASYMM8_SIGNED
1730 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>S32
1731 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>S32
1732 <tr><td>QASYMM8_SIGNED<td>QSYMM8<td>S32<td>S32
1733 </table>
1734<tr>
1735 <td>CLGEMMLowpMatrixMultiplyCore
1736 <td>
1737 <ul>
1738 <li>NHWC
1739 <li>NCHW
1740 </ul>
1741 <td>
1742 <table>
1743 <tr><th>src0<th>src1<th>src2<th>dst
1744 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>QASYMM8
1745 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8
1746 <tr><td>QASYMM8<td>QSYMM8<td>S32<td>QASYMM8
1747 <tr><td>QASYMM8<td>QASYMM8<td>S32<td>S32
1748 <tr><td>QASYMM8<td>QSYMM8_PER_CHANNEL<td>S32<td>S32
1749 <tr><td>QASYMM8<td>QSYMM8<td>S32<td>S32
1750 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>QASYMM8_SIGNED
1751 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>QASYMM8_SIGNED
1752 <tr><td>QASYMM8_SIGNED<td>QSYMM8<td>S32<td>QASYMM8_SIGNED
1753 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>S32<td>S32
1754 <tr><td>QASYMM8_SIGNED<td>QSYMM8_PER_CHANNEL<td>S32<td>S32
1755 <tr><td>QASYMM8_SIGNED<td>QSYMM8<td>S32<td>S32
1756 </table>
1757<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +01001758 <td rowspan="2">GEMMLowpOutputStage
1759 <td rowspan="2" style="width:200px;"> General Matrix Multiplication.
1760 <td rowspan="2">
1761 <ul>
1762 <li>n/a
1763 </ul>
1764 <td>NEGEMMLowpOutputStage
1765 <td>
1766 <ul>
1767 <li>All
1768 </ul>
1769 <td>
1770 <table>
1771 <tr><th>src0<th>src1<th>dst
1772 <tr><td>S32<td>S32<td>QASYMM8
1773 <tr><td>S32<td>S32<td>QASYMM8_SIGNED
1774 <tr><td>S32<td>S32<td>QSYMM16
1775 </table>
1776<tr>
1777 <td>CLGEMMLowpOutputStage
1778 <td>
1779 <ul>
1780 <li>All
1781 </ul>
1782 <td>
1783 <table>
1784 <tr><th>src0<th>src1<th>dst
1785 <tr><td>S32<td>S32<td>QASYMM8
1786 <tr><td>S32<td>S32<td>QASYMM8_SIGNED
1787 <tr><td>S32<td>S32<td>QSYMM16
1788 </table>
1789<tr>
Teresa Charlin62687422021-04-28 10:58:49 +01001790 <td rowspan="2">GenerateProposalsLayer
1791 <td rowspan="2" style="width:200px;"> Function to generate proposals for a RPN (Region Proposal Network).
1792 <td rowspan="2">
1793 <ul>
1794 <li>ANEURALNETWORKS_GENERATE_PROPOSALS
1795 </ul>
1796 <td>NEGenerateProposalsLayer
1797 <td>
1798 <ul>
1799 <li>All
1800 </ul>
1801 <td>
1802 <table>
1803 <tr><th>src0<th>src1<th>src2<th>dst
1804 <tr><td>F16<td>F16<td>F16<td>F16
1805 <tr><td>F32<td>F32<td>F32<td>F32
1806 <tr><td>QASYMM8<td>QSYMM8<td>QSYMM16<td>QASYMM8
1807 </table>
1808<tr>
1809 <td>CLGenerateProposalsLayer
1810 <td>
1811 <ul>
1812 <li>All
1813 </ul>
1814 <td>
1815 <table>
1816 <tr><th>src0<th>src1<th>src2<th>dst
1817 <tr><td>F16<td>F16<td>F16<td>F16
1818 <tr><td>F32<td>F32<td>F32<td>F32
1819 <tr><td>QASYMM8<td>QSYMM8<td>QSYMM16<td>QASYMM8
1820 </table>
1821<tr>
1822 <td rowspan="2">InstanceNormalizationLayer
1823 <td rowspan="2" style="width:200px;"> Function to perform a Instance normalization on a given axis.
1824 <td rowspan="2">
1825 <ul>
1826 <li>ANEURALNETWORKS_INSTANCE_NORMALIZATION
1827 </ul>
1828 <td>NEInstanceNormalizationLayer
1829 <td>
1830 <ul>
1831 <li>NHWC
1832 <li>NCHW
1833 </ul>
1834 <td>
1835 <table>
1836 <tr><th>src<th>dst
1837 <tr><td>F16<td>F16
1838 <tr><td>F32<td>F32
1839 </table>
1840<tr>
1841 <td>CLInstanceNormalizationLayer
1842 <td>
1843 <ul>
1844 <li>NHWC
1845 <li>NCHW
1846 </ul>
1847 <td>
1848 <table>
1849 <tr><th>src<th>dst
1850 <tr><td>F16<td>F16
1851 <tr><td>F32<td>F32
1852 </table>
1853<tr>
1854 <td rowspan="2">L2NormalizeLayer
1855 <td rowspan="2" style="width:200px;"> Function to perform a L2 normalization on a given axis.
1856 <td rowspan="2">
1857 <ul>
1858 <li>ANEURALNETWORKS_L2_NORMALIZATION
1859 </ul>
1860 <td>NEL2NormalizeLayer
1861 <td>
1862 <ul>
1863 <li>NHWC
1864 <li>NCHW
1865 </ul>
1866 <td>
1867 <table>
1868 <tr><th>src<th>dst
1869 <tr><td>F16<td>F16
1870 <tr><td>F32<td>F32
1871 </table>
1872<tr>
1873 <td>CLL2NormalizeLayer
1874 <td>
1875 <ul>
1876 <li>NHWC
1877 <li>NCHW
1878 </ul>
1879 <td>
1880 <table>
1881 <tr><th>src<th>dst
1882 <tr><td>F16<td>F16
1883 <tr><td>F32<td>F32
1884 </table>
1885<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +01001886 <td rowspan="3">Logical
1887 <td rowspan="3" style="width:200px;"> Function to perform: - Logical AND - Logical OR - Logical NOT
1888 <td rowspan="3">
1889 <ul>
1890 <li>n/a
1891 </ul>
1892 <td>NELogicalAnd
1893 <td>
1894 <ul>
1895 <li>All
1896 </ul>
1897 <td>
1898 <table>
1899 <tr><th>src0<th>src1<th>dst
1900 <tr><td>U8<td>U8<td>U8
1901 </table>
1902<tr>
1903 <td>NELogicalOr
1904 <td>
1905 <ul>
1906 <li>All
1907 </ul>
1908 <td>
1909 <table>
1910 <tr><th>src0<th>src1<th>dst
1911 <tr><td>U8<td>U8<td>U8
1912 </table>
1913<tr>
1914 <td>NELogicalNot
1915 <td>
1916 <ul>
1917 <li>All
1918 </ul>
1919 <td>
1920 <table>
1921 <tr><th>src<th>dst
1922 <tr><td>U8<td>U8
1923 </table>
1924<tr>
1925 <td rowspan="1">LogicalAnd
1926 <td rowspan="1" style="width:200px;"> Function to perform Logical AND.
1927 <td rowspan="1">
1928 <ul>
1929 <li>n/a
1930 </ul>
1931 <td>CLLogicalAnd
1932 <td>
1933 <ul>
1934 <li>All
1935 </ul>
1936 <td>
1937 <table>
1938 <tr><th>src0<th>src1<th>dst
1939 <tr><td>U8<td>U8<td>U8
1940 </table>
1941<tr>
1942 <td rowspan="1">LogicalOr
1943 <td rowspan="1" style="width:200px;"> Function to perform Logical OR.
1944 <td rowspan="1">
1945 <ul>
1946 <li>n/a
1947 </ul>
1948 <td>CLLogicalOr
1949 <td>
1950 <ul>
1951 <li>All
1952 </ul>
1953 <td>
1954 <table>
1955 <tr><th>src0<th>src1<th>dst
1956 <tr><td>U8<td>U8<td>U8
1957 </table>
1958<tr>
1959 <td rowspan="1">LogicalNot
1960 <td rowspan="1" style="width:200px;"> Function to perform Logical NOT.
1961 <td rowspan="1">
1962 <ul>
1963 <li>n/a
1964 </ul>
1965 <td>CLLogicalNot
1966 <td>
1967 <ul>
1968 <li>All
1969 </ul>
1970 <td>
1971 <table>
1972 <tr><th>src<th>dst
1973 <tr><td>U8<td>U8
1974 </table>
1975<tr>
Teresa Charlin62687422021-04-28 10:58:49 +01001976 <td rowspan="2">LSTMLayer
1977 <td rowspan="2" style="width:200px;"> Function to perform a single time step in a Long Short-Term Memory (LSTM) layer.
1978 <td rowspan="2">
1979 <ul>
1980 <li>ANEURALNETWORKS_LSTM
1981 </ul>
1982 <td>NELSTMLayer
1983 <td>
1984 <ul>
1985 <li>All
1986 </ul>
1987 <td>
1988 <table>
1989 <tr><th>src0 - src13<th>dst0 - dst3
1990 <tr><td>F16<td>F16
1991 <tr><td>F32<td>F32
1992 </table>
1993<tr>
1994 <td>CLLSTMLayer
1995 <td>
1996 <ul>
1997 <li>All
1998 </ul>
1999 <td>
2000 <table>
2001 <tr><th>src0 - src13<th>dst0 - dst3
2002 <tr><td>F16<td>F16
2003 <tr><td>F32<td>F32
2004 </table>
2005<tr>
2006 <td rowspan="2">LSTMLayerQuantized
2007 <td rowspan="2" style="width:200px;"> Function to perform quantized LSTM (Long Short-Term Memory)
2008 <td rowspan="2">
2009 <ul>
2010 <li>ANEURALNETWORKS_QUANTIZED_LSTM
2011 <li>ANEURALNETWORKS_QUANTIZED_16BIT_LSTM
2012 </ul>
2013 <td>NELSTMLayerQuantized
2014 <td>
2015 <ul>
2016 <li>All
2017 </ul>
2018 <td>
2019 <table>
2020 <tr><th>src0 - src8<th>src9 - src12<th>src13<th>src14<th>dst0<th>dst1
2021 <tr><td>QASYMM8<td>S32<td>QSYMM16<td>QASYMM8<td>QSYMM16<td>QASYMM8
2022 </table>
2023<tr>
2024 <td>CLLSTMLayerQuantized
2025 <td>
2026 <ul>
2027 <li>All
2028 </ul>
2029 <td>
2030 <table>
2031 <tr><th>src0 - src8<th>src9 - src12<th>src13<th>src14<th>dst0<th>dst1
2032 <tr><td>QASYMM8<td>S32<td>QSYMM16<td>QASYMM8<td>QSYMM16<td>QASYMM8
2033 </table>
2034<tr>
2035 <td rowspan="2">MaxUnpoolingLayer
2036 <td rowspan="2" style="width:200px;"> Function to perform MaxUnpooling.
2037 <td rowspan="2">
2038 <ul>
2039 <li>n/a
2040 </ul>
2041 <td>NEMaxUnpoolingLayer
2042 <td>
2043 <ul>
2044 <li>NHWC
2045 <li>NCHW
2046 </ul>
2047 <td>
2048 <table>
2049 <tr><th>src<th>dst
2050 <tr><td>QASYMM8<td>QASYMM8
2051 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2052 <tr><td>F16<td>F16
2053 <tr><td>F32<td>F32
2054 </table>
2055<tr>
2056 <td>CLMaxUnpoolingLayer
2057 <td>
2058 <ul>
2059 <li>NHWC
2060 <li>NCHW
2061 </ul>
2062 <td>
2063 <table>
2064 <tr><th>src<th>dst
2065 <tr><td>QASYMM8<td>QASYMM8
2066 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2067 <tr><td>F16<td>F16
2068 <tr><td>F32<td>F32
2069 </table>
2070<tr>
2071 <td rowspan="2">MeanStdDevNormalizationLayer
2072 <td rowspan="2" style="width:200px;"> Function to execute mean and standard deviation normalization.
2073 <td rowspan="2">
2074 <ul>
2075 <li>n/a
2076 </ul>
2077 <td>NEMeanStdDevNormalizationLayer
2078 <td>
2079 <ul>
2080 <li>NHWC
2081 <li>NCHW
2082 </ul>
2083 <td>
2084 <table>
2085 <tr><th>src<th>dst
2086 <tr><td>F32<td>F32
2087 <tr><td>F16<td>F16
2088 </table>
2089<tr>
2090 <td>CLMeanStdDevNormalizationLayer
2091 <td>
2092 <ul>
2093 <li>NHWC
2094 <li>NCHW
2095 </ul>
2096 <td>
2097 <table>
2098 <tr><th>src<th>dst
2099 <tr><td>F32<td>F32
2100 <tr><td>F16<td>F16
2101 </table>
2102<tr>
2103 <td rowspan="2">NormalizationLayer
2104 <td rowspan="2" style="width:200px;"> Function to compute normalization layer.
2105 <td rowspan="2">
2106 <ul>
2107 <li>ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION
2108 </ul>
2109 <td>NENormalizationLayer
2110 <td>
2111 <ul>
2112 <li>NHWC
2113 <li>NCHW
2114 </ul>
2115 <td>
2116 <table>
2117 <tr><th>src<th>dst
2118 <tr><td>F32<td>F32
2119 <tr><td>F16<td>F16
2120 </table>
2121<tr>
2122 <td>CLNormalizationLayer
2123 <td>
2124 <ul>
2125 <li>NHWC
2126 <li>NCHW
2127 </ul>
2128 <td>
2129 <table>
2130 <tr><th>src<th>dst
2131 <tr><td>F32<td>F32
2132 <tr><td>F16<td>F16
2133 </table>
2134<tr>
2135 <td rowspan="2">PadLayer
2136 <td rowspan="2" style="width:200px;"> Function to pad a tensor.
2137 <td rowspan="2">
2138 <ul>
2139 <li>ANEURALNETWORKS_PAD
2140 <li>ANEURALNETWORKS_PAD_V2
2141 </ul>
2142 <td>NEPadLayer
2143 <td>
2144 <ul>
2145 <li>NHWC
2146 <li>NCHW
2147 </ul>
2148 <td>
2149 <table>
2150 <tr><th>src<th>dst
2151 <tr><td>All<td>All
2152 </table>
2153<tr>
2154 <td>CLPadLayer
2155 <td>
2156 <ul>
2157 <li>NHWC
2158 <li>NCHW
2159 </ul>
2160 <td>
2161 <table>
2162 <tr><th>src<th>dst
2163 <tr><td>All<td>All
2164 </table>
2165<tr>
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002166 <td rowspan="2">Permute
2167 <td rowspan="2" style="width:200px;"> Function to transpose an ND tensor.
2168 <td rowspan="2">
2169 <ul>
2170 <li>ANEURALNETWORKS_TRANSPOSE
2171 </ul>
2172 <td>NEPermute
2173 <td>
2174 <ul>
2175 <li>NHWC
2176 <li>NCHW
2177 </ul>
2178 <td>
2179 <table>
2180 <tr><th>src<th>dst
2181 <tr><td>All<td>All
2182 </table>
2183<tr>
2184 <td>CLPermute
2185 <td>
2186 <ul>
2187 <li>NHWC
2188 <li>NCHW
2189 </ul>
2190 <td>
2191 <table>
2192 <tr><th>src<th>dst
2193 <tr><td>All<td>All
2194 </table>
2195<tr>
2196 <td rowspan="2">PixelWiseMultiplication
Jakub Sujakee301b32021-06-04 09:46:08 +01002197 <td rowspan="2" style="width:200px;"> Function to perform a multiplication.
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002198 <td rowspan="2">
2199 <ul>
2200 <li>ANEURALNETWORKS_MUL
2201 </ul>
2202 <td>NEPixelWiseMultiplication
2203 <td>
2204 <ul>
2205 <li>All
2206 </ul>
2207 <td>
2208 <table>
2209 <tr><th>src0<th>src1<th>dst
2210 <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
2211 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2212 <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
2213 <tr><td>QSYMM16<td>QSYMM16<td>S32
2214 <tr><td>U8<td>U8<td>U8
2215 <tr><td>U8<td>U8<td>S16
2216 <tr><td>U8<td>S16<td>S16
2217 <tr><td>S16<td>U8<td>S16
2218 <tr><td>S16<td>S16<td>S16
2219 <tr><td>F16<td>F16<td>F16
2220 <tr><td>F32<td>S32<td>F32
2221 </table>
2222<tr>
2223 <td>CLPixelWiseMultiplication
2224 <td>
2225 <ul>
2226 <li>All
2227 </ul>
2228 <td>
2229 <table>
2230 <tr><th>src0<th>src1<th>dst
2231 <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
2232 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2233 <tr><td>QSYMM16<td>QSYMM16<td>QASYMM16
2234 <tr><td>QSYMM16<td>QSYMM16<td>S32
2235 <tr><td>U8<td>U8<td>U8
2236 <tr><td>U8<td>U8<td>S16
2237 <tr><td>U8<td>S16<td>S16
2238 <tr><td>S16<td>U8<td>S16
2239 <tr><td>S16<td>S16<td>S16
2240 <tr><td>F16<td>F16<td>F16
Jakub Sujakee301b32021-06-04 09:46:08 +01002241 <tr><td>F32<td>F32<td>F32
2242 <tr><td>S32<td>S32<td>S32
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002243 </table>
2244<tr>
2245 <td rowspan="2">PoolingLayer
Jakub Sujakee301b32021-06-04 09:46:08 +01002246 <td rowspan="2" style="width:200px;"> Function to perform pooling with the specified pooling operation.
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002247 <td rowspan="2">
2248 <ul>
2249 <li>ANEURALNETWORKS_AVERAGE_POOL_2D
2250 <li>ANEURALNETWORKS_L2_POOL_2D
2251 <li>ANEURALNETWORKS_MAX_POOL_2D
2252 </ul>
2253 <td>NEPoolingLayer
2254 <td>
2255 <ul>
2256 <li>NHWC
2257 <li>NCHW
2258 </ul>
2259 <td>
2260 <table>
2261 <tr><th>src<th>dst
2262 <tr><td>QASYMM8<td>QASYMM8
2263 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2264 <tr><td>F16<td>F16
2265 <tr><td>F32<td>F32
2266 </table>
2267<tr>
2268 <td>CLPoolingLayer
2269 <td>
2270 <ul>
2271 <li>NHWC
2272 <li>NCHW
2273 </ul>
2274 <td>
2275 <table>
2276 <tr><th>src<th>dst
2277 <tr><td>QASYMM8<td>QASYMM8
2278 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2279 <tr><td>F16<td>F16
2280 <tr><td>F32<td>F32
2281 </table>
2282<tr>
2283 <td rowspan="2">PReluLayer
2284 <td rowspan="2" style="width:200px;"> Function to compute the activation layer with the PRELU activation function.
2285 <td rowspan="2">
2286 <ul>
2287 <li>ANEURALNETWORKS_PRELU
2288 </ul>
2289 <td>NEPReluLayer
2290 <td>
2291 <ul>
2292 <li>All
2293 </ul>
2294 <td>
2295 <table>
2296 <tr><th>src<th>dst
2297 <tr><td>QASYMM8<td>QASYMM8
2298 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2299 <tr><td>F16<td>F16
2300 <tr><td>F32<td>F32
2301 </table>
2302<tr>
2303 <td>CLPReluLayer
2304 <td>
2305 <ul>
2306 <li>All
2307 </ul>
2308 <td>
2309 <table>
2310 <tr><th>src<th>dst
2311 <tr><td>QASYMM8<td>QASYMM8
2312 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2313 <tr><td>F16<td>F16
2314 <tr><td>F32<td>F32
2315 </table>
2316<tr>
Teresa Charlin62687422021-04-28 10:58:49 +01002317 <td rowspan="2">PriorBoxLayer
Sheri Zhang6124ce62021-05-04 14:03:13 +01002318 <td rowspan="2" style="width:200px;"> Function to compute prior boxes and clip.
Teresa Charlin62687422021-04-28 10:58:49 +01002319 <td rowspan="2">
2320 <ul>
2321 <li>n/a
2322 </ul>
2323 <td>NEPriorBoxLayer
2324 <td>
2325 <ul>
2326 <li>NHWC
2327 <li>NCHW
2328 </ul>
2329 <td>
2330 <table>
2331 <tr><th>src0<th>src1<th>dst
2332 <tr><td>F32<td>F32<td>F32
2333 </table>
2334<tr>
2335 <td>CLPriorBoxLayer
2336 <td>
2337 <ul>
2338 <li>NHWC
2339 <li>NCHW
2340 </ul>
2341 <td>
2342 <table>
2343 <tr><th>src0<th>src1<th>dst
2344 <tr><td>F32<td>F32<td>F32
2345 </table>
2346<tr>
2347 <td rowspan="2">QLSTMLayer
2348 <td rowspan="2" style="width:200px;"> Function to perform quantized LSTM (Long Short-Term Memory).
2349 <td rowspan="2">
2350 <ul>
2351 <li>ANEURALNETWORKS_QUANTIZED_LSTM
2352 <li>ANEURALNETWORKS_QUANTIZED_16BIT_LSTM
2353 </ul>
2354 <td>NEQLSTMLayer
2355 <td>
2356 <ul>
2357 <li>All
2358 </ul>
2359 <td>
2360 <table>
2361 <tr><th>src0<th>src1 - src6<th>src7 -src9<th>src10<th>src11<th>dst0<th>dst1 - dst2
2362 <tr><td>QASYMM8_SIGNED<td>QASYMM8<td>S32<td>QSYMM16<td>QASYMM8_SIGNED<td>QSYMM16<td>QASYMM8_SIGNED
2363 </table>
2364<tr>
2365 <td>CLQLSTMLayer
2366 <td>
2367 <ul>
2368 <li>All
2369 </ul>
2370 <td>
2371 <table>
2372 <tr><th>src0<th>src1 - src6<th>src7 -src9<th>src10<th>src11<th>dst0<th>dst1 - dst2
2373 <tr><td>QASYMM8_SIGNED<td>QASYMM8<td>S32<td>QSYMM16<td>QASYMM8_SIGNED<td>QSYMM16<td>QASYMM8_SIGNED
2374 </table>
2375<tr>
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002376 <td rowspan="2">QuantizationLayer
2377 <td rowspan="2" style="width:200px;"> Function to perform quantization layer
2378 <td rowspan="2">
2379 <ul>
2380 <li>ANEURALNETWORKS_QUANTIZE
2381 </ul>
2382 <td>NEQuantizationLayer
2383 <td>
2384 <ul>
2385 <li>All
2386 </ul>
2387 <td>
2388 <table>
2389 <tr><th>src<th>dst
Teresa Charlin62687422021-04-28 10:58:49 +01002390 <tr><td>QASYMM8<td>QASYMM8, QASYMM8_SIGNED, QASYMM16
2391 <tr><td>QASYMM8_SIGNED<td>QASYMM8, QASYMM8_SIGNED, QASYMM16
2392 <tr><td>F16<td>QASYMM8, QASYMM8_SIGNED, QASYMM16
2393 <tr><td>F32<td>QASYMM8, QASYMM8_SIGNED, QASYMM16
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002394 </table>
2395<tr>
2396 <td>CLQuantizationLayer
2397 <td>
2398 <ul>
2399 <li>All
2400 </ul>
2401 <td>
2402 <table>
2403 <tr><th>src<th>dst
Teresa Charlin62687422021-04-28 10:58:49 +01002404 <tr><td>QASYMM8<td>QASYMM8, QASYMM8_SIGNED, QASYMM16
2405 <tr><td>QASYMM8_SIGNED<td>QASYMM8, QASYMM8_SIGNED, QASYMM16
2406 <tr><td>F16<td>QASYMM8, QASYMM8_SIGNED, QASYMM16
2407 <tr><td>F32<td>QASYMM8, QASYMM8_SIGNED, QASYMM16
2408 </table>
2409<tr>
2410 <td rowspan="2">Range
2411 <td rowspan="2" style="width:200px;"> Function to generates a sequence of numbers starting from START and extends by increments of 'STEP' up to but not including 'END'.
2412 <td rowspan="2">
2413 <ul>
2414 <li>n/a
2415 </ul>
2416 <td>NERange
2417 <td>
2418 <ul>
2419 <li>All
2420 </ul>
2421 <td>
2422 <table>
2423 <tr><th>dst
2424 <tr><td>U8
2425 <tr><td>S8
2426 <tr><td>U16
2427 <tr><td>S16
2428 <tr><td>U32
2429 <tr><td>S32
2430 <tr><td>F16
2431 <tr><td>F32
2432 </table>
2433<tr>
2434 <td>CLRange
2435 <td>
2436 <ul>
2437 <li>All
2438 </ul>
2439 <td>
2440 <table>
2441 <tr><th>dst
2442 <tr><td>U8
2443 <tr><td>S8
2444 <tr><td>QASYMM8
2445 <tr><td>U16
2446 <tr><td>S16
2447 <tr><td>U32
2448 <tr><td>S32
2449 <tr><td>F16
2450 <tr><td>F32
2451 </table>
2452<tr>
2453 <td rowspan="2">ReduceMean
Jakub Sujakee301b32021-06-04 09:46:08 +01002454 <td rowspan="2" style="width:200px;"> Function to perform reduce mean operation.
Teresa Charlin62687422021-04-28 10:58:49 +01002455 <td rowspan="2">
2456 <ul>
2457 <li>ANEURALNETWORKS_MEAN
2458 </ul>
2459 <td>NEReduceMean
2460 <td>
2461 <ul>
2462 <li>All
2463 </ul>
2464 <td>
2465 <table>
2466 <tr><th>src<th>dst
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002467 <tr><td>QASYMM8<td>QASYMM8
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002468 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
Teresa Charlin62687422021-04-28 10:58:49 +01002469 <tr><td>F16<td>F16
2470 <tr><td>F32<td>F32
2471 </table>
2472<tr>
2473 <td>CLReduceMean
2474 <td>
2475 <ul>
2476 <li>All
2477 </ul>
2478 <td>
2479 <table>
2480 <tr><th>src<th>dst
2481 <tr><td>QASYMM8<td>QASYMM8
2482 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2483 <tr><td>F16<td>F16
2484 <tr><td>F32<td>F32
2485 </table>
2486<tr>
2487 <td rowspan="2">ReductionOperation
Jakub Sujakee301b32021-06-04 09:46:08 +01002488 <td rowspan="2" style="width:200px;"> Function to perform reduce with the following operations - ARG_IDX_MAX: Index of the max value - ARG_IDX_MIN: Index of the min value - MEAN_SUM: Mean of sum - PROD: Product - SUM_SQUARE: Sum of squares - SUM: Sum - MIN: Min - MAX: Max
Teresa Charlin62687422021-04-28 10:58:49 +01002489 <td rowspan="2">
2490 <ul>
2491 <li>ANEURALNETWORKS_REDUCE_ALL
2492 <li>ANEURALNETWORKS_REDUCE_ANY
2493 <li>ANEURALNETWORKS_REDUCE_MAX
2494 <li>ANEURALNETWORKS_REDUCE_MIN
2495 <li>ANEURALNETWORKS_REDUCE_PROD
2496 <li>ANEURALNETWORKS_REDUCE_SUM
2497 </ul>
2498 <td>NEReductionOperation
2499 <td>
2500 <ul>
2501 <li>All
2502 </ul>
2503 <td>
2504 <table>
2505 <tr><th>src<th>dst
2506 <tr><td>QASYMM8<td>QASYMM8
2507 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2508 <tr><td>F16<td>F16
2509 <tr><td>F32<td>F32
2510 <tr><td>S32<td>S32
2511 </table>
2512<tr>
2513 <td>CLReductionOperation
2514 <td>
2515 <ul>
2516 <li>All
2517 </ul>
2518 <td>
2519 <table>
2520 <tr><th>src<th>dst
2521 <tr><td>QASYMM8<td>QASYMM8
2522 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2523 <tr><td>F16<td>F16
2524 <tr><td>F32<td>F32
2525 <tr><td>S32<td>S32
2526 </table>
2527<tr>
2528 <td rowspan="2">ReorgLayer
2529 <td rowspan="2" style="width:200px;"> Performs a reorganization layer of input tensor to the output tensor.
2530 <td rowspan="2">
2531 <ul>
2532 <li>n/a
2533 </ul>
2534 <td>NEReorgLayer
2535 <td>
2536 <ul>
2537 <li>NHWC
2538 <li>NCHW
2539 </ul>
2540 <td>
2541 <table>
2542 <tr><th>src<th>dst
2543 <tr><td>All<td>All
2544 </table>
2545<tr>
2546 <td>CLReorgLayer
2547 <td>
2548 <ul>
2549 <li>NHWC
2550 <li>NCHW
2551 </ul>
2552 <td>
2553 <table>
2554 <tr><th>src<th>dst
2555 <tr><td>All<td>All
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002556 </table>
2557<tr>
2558 <td rowspan="2">ReshapeLayer
Teresa Charlin62687422021-04-28 10:58:49 +01002559 <td rowspan="2" style="width:200px;"> Function to reshape a tensor.
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002560 <td rowspan="2">
2561 <ul>
2562 <li>ANEURALNETWORKS_RESHAPE
2563 <li>ANEURALNETWORKS_SQUEEZE
2564 </ul>
2565 <td>NEReshapeLayer
2566 <td>
2567 <ul>
2568 <li>All
2569 </ul>
2570 <td>
2571 <table>
2572 <tr><th>src<th>dst
2573 <tr><td>All<td>All
2574 </table>
2575<tr>
2576 <td>CLReshapeLayer
2577 <td>
2578 <ul>
2579 <li>All
2580 </ul>
2581 <td>
2582 <table>
2583 <tr><th>src<th>dst
2584 <tr><td>All<td>All
2585 </table>
2586<tr>
Teresa Charlin62687422021-04-28 10:58:49 +01002587 <td rowspan="2">Reverse
2588 <td rowspan="2" style="width:200px;"> Function to reverse tensor according to axis.
2589 <td rowspan="2">
2590 <ul>
2591 <li>n/a
2592 </ul>
2593 <td>NEReverse
2594 <td>
2595 <ul>
2596 <li>All
2597 </ul>
2598 <td>
2599 <table>
2600 <tr><th>src0<th>src1<th>dst
2601 <tr><td>All<td>U32<td>All
2602 </table>
2603<tr>
2604 <td>CLReverse
2605 <td>
2606 <ul>
2607 <li>All
2608 </ul>
2609 <td>
2610 <table>
2611 <tr><th>src0<th>src1<th>dst
2612 <tr><td>All<td>U32<td>All
2613 </table>
2614<tr>
2615 <td rowspan="2">RNNLayer
2616 <td rowspan="2" style="width:200px;"> Function to perform recurrent neural network layer.
2617 <td rowspan="2">
2618 <ul>
2619 <li>ANEURALNETWORKS_RNN
2620 </ul>
2621 <td>NERNNLayer
2622 <td>
2623 <ul>
2624 <li>NHWC
2625 <li>NCHW
2626 </ul>
2627 <td>
2628 <table>
2629 <tr><th>src0<th>src1<th>src2<th>src3<th>dst0<th>dst1
2630 <tr><td>F16<td>F16<td>F16<td>F16<td>F16<td>F16
2631 <tr><td>F32<td>F32<td>F32<td>F32<td>F32<td>F32
2632 </table>
2633<tr>
2634 <td>CLRNNLayer
2635 <td>
2636 <ul>
2637 <li>NHWC
2638 <li>NCHW
2639 </ul>
2640 <td>
2641 <table>
2642 <tr><th>src0<th>src1<th>src2<th>src3<th>dst0<th>dst1
2643 <tr><td>F16<td>F16<td>F16<td>F16<td>F16<td>F16
2644 <tr><td>F32<td>F32<td>F32<td>F32<td>F32<td>F32
2645 </table>
2646<tr>
2647 <td rowspan="2">ROIAlignLayer
2648 <td rowspan="2" style="width:200px;"> Function to perform ROI alignment.
2649 <td rowspan="2">
2650 <ul>
2651 <li>ANEURALNETWORKS_ROI_ALIGN
2652 </ul>
2653 <td>NEROIAlignLayer
2654 <td>
2655 <ul>
2656 <li>All
2657 </ul>
2658 <td>
2659 <table>
2660 <tr><th>src0<th>src1<th>dst
2661 <tr><td>F16<td>F16<td>F16
2662 <tr><td>F32<td>F32<td>F32
2663 <tr><td>QASYMM8<td>QASYMM16<td>QASYMM8
2664 <tr><td>QASYMM8_SIGNED<td>QASYMM16<td>QASYMM8_SIGNED
2665 </table>
2666<tr>
2667 <td>CLROIAlignLayer
2668 <td>
2669 <ul>
2670 <li>All
2671 </ul>
2672 <td>
2673 <table>
2674 <tr><th>src0<th>src1<th>dst
2675 <tr><td>F16<td>F16<td>F16
2676 <tr><td>F32<td>F32<td>F32
2677 <tr><td>QASYMM8<td>QASYMM16<td>QASYMM8
2678 <tr><td>QASYMM8_SIGNED<td>QASYMM16<td>QASYMM8_SIGNED
2679 </table>
2680<tr>
2681 <td rowspan="2">ROIPoolingLayer
2682 <td rowspan="2" style="width:200px;"> Function to perform ROI pooling.
2683 <td rowspan="2">
2684 <ul>
2685 <li>ANEURALNETWORKS_ROI_POOLING
2686 </ul>
2687 <td>NEROIPoolingLayer
2688 <td>
2689 <ul>
2690 <li>All
2691 </ul>
2692 <td>
2693 <table>
2694 <tr><th>src0<th>src1<th>dst
2695 <tr><td>F32<td>U16<td>F32
2696 <tr><td>QASYMM8<td>U16<td>QASYMM8
2697 </table>
2698<tr>
2699 <td>CLROIPoolingLayer
2700 <td>
2701 <ul>
2702 <li>All
2703 </ul>
2704 <td>
2705 <table>
2706 <tr><th>src0<th>src1<th>dst
2707 <tr><td>F16<td>U16<td>F16
2708 <tr><td>F32<td>U16<td>F32
2709 <tr><td>QASYMM8<td>U16<td>QASYMM8
2710 </table>
2711<tr>
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002712 <td rowspan="2">Scale
Teresa Charlin62687422021-04-28 10:58:49 +01002713 <td rowspan="2" style="width:200px;"> Function to perform resize a tensor using to interpolate: - Bilinear - Nearest neighbor
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002714 <td rowspan="2">
2715 <ul>
2716 <li>ANEURALNETWORKS_RESIZE_BILINEAR
2717 <li>ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR
2718 </ul>
2719 <td>NEScale
2720 <td>
2721 <ul>
2722 <li>NHWC
2723 <li>NCHW
2724 </ul>
2725 <td>
2726 <table>
2727 <tr><th>src<th>dst
2728 <tr><td>QASYMM8<td>QASYMM8
2729 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2730 <tr><td>F16<td>F16
2731 <tr><td>F32<td>F32
2732 <tr><td>U8<td>U8
2733 <tr><td>S16<td>S16
2734 </table>
2735<tr>
2736 <td>CLScale
2737 <td>
2738 <ul>
2739 <li>NHWC
2740 <li>NCHW
2741 </ul>
2742 <td>
2743 <table>
2744 <tr><th>src<th>dst
2745 <tr><td>QASYMM8<td>QASYMM8
2746 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2747 <tr><td>F16<td>F16
2748 <tr><td>F32<td>F32
2749 <tr><td>U8<td>U8
2750 <tr><td>S16<td>S16
2751 </table>
2752<tr>
Teresa Charlin62687422021-04-28 10:58:49 +01002753 <td rowspan="2">Select
2754 <td rowspan="2" style="width:200px;"> Function to select values from 2 tensors depending on an input tensor of booleans.
2755 <td rowspan="2">
2756 <ul>
2757 <li>ANEURALNETWORKS_SELECT
2758 </ul>
2759 <td>NESelect
2760 <td>
2761 <ul>
2762 <li>All
2763 </ul>
2764 <td>
2765 <table>
2766 <tr><th>src0<th>src1<th>src2<th>dst
2767 <tr><td>U8<td>All<td>All<td>All
2768 </table>
2769<tr>
2770 <td>CLSelect
2771 <td>
2772 <ul>
2773 <li>All
2774 </ul>
2775 <td>
2776 <table>
2777 <tr><th>src0<th>src1<th>src2<th>dst
2778 <tr><td>U8<td>All<td>All<td>All
2779 </table>
2780<tr>
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002781 <td rowspan="2">Slice
2782 <td rowspan="2" style="width:200px;"> Function to perform tensor slicing.
2783 <td rowspan="2">
2784 <ul>
2785 <li>ANEURALNETWORKS_SLICE
2786 </ul>
2787 <td>NESlice
2788 <td>
2789 <ul>
2790 <li>All
2791 </ul>
2792 <td>
2793 <table>
2794 <tr><th>src<th>dst
2795 <tr><td>All<td>All
2796 </table>
2797<tr>
2798 <td>CLSlice
2799 <td>
2800 <ul>
2801 <li>All
2802 </ul>
2803 <td>
2804 <table>
2805 <tr><th>src<th>dst
2806 <tr><td>All<td>All
2807 </table>
2808<tr>
Sheri Zhang6124ce62021-05-04 14:03:13 +01002809 <td rowspan="2">SoftmaxLayer
2810 <td rowspan="2" style="width:200px;"> Function to compute a SoftmaxLayer and a Log SoftmaxLayer.
2811 <td rowspan="2">
2812 <ul>
2813 <li>ANEURALNETWORKS_LOG_SOFTMAX
2814 <li>ANEURALNETWORKS_SOFTMAX
2815 </ul>
2816 <td>NESoftmaxLayerGeneric
2817 <td>
2818 <ul>
2819 <li>All
2820 </ul>
2821 <td>
2822 <table>
2823 <tr><th>src<th>dst
2824 <tr><td>QASYMM8<td>QASYMM8
2825 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2826 <tr><td>F16<td>F16
2827 <tr><td>F32<td>F32
2828 </table>
2829<tr>
2830 <td>CLSoftmaxLayerGeneric
2831 <td>
2832 <ul>
2833 <li>All
2834 </ul>
2835 <td>
2836 <table>
2837 <tr><th>src<th>dst
2838 <tr><td>QASYMM8<td>QASYMM8
2839 <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
2840 <tr><td>F16<td>F16
2841 <tr><td>F32<td>F32
2842 </table>
2843<tr>
Teresa Charlin62687422021-04-28 10:58:49 +01002844 <td rowspan="2">SpaceToBatchLayer
2845 <td rowspan="2" style="width:200px;"> Function to divide a tensor spatially.
2846 <td rowspan="2">
2847 <ul>
2848 <li>ANEURALNETWORKS_SPACE_TO_BATCH_ND
2849 </ul>
2850 <td>NESpaceToBatchLayer
2851 <td>
2852 <ul>
2853 <li>NHWC
2854 <li>NCHW
2855 </ul>
2856 <td>
2857 <table>
2858 <tr><th>src0<th>src1<th>src2<th>dst
2859 <tr><td>All<td>S32<td>S32<td>All
2860 </table>
2861<tr>
2862 <td>CLSpaceToBatchLayer
2863 <td>
2864 <ul>
2865 <li>NHWC
2866 <li>NCHW
2867 </ul>
2868 <td>
2869 <table>
2870 <tr><th>src0<th>src1<th>src2<th>dst
2871 <tr><td>All<td>S32<td>S32<td>All
2872 </table>
2873<tr>
2874 <td rowspan="2">SpaceToDepthLayer
2875 <td rowspan="2" style="width:200px;"> Function to rearrange blocks of spatial data into depth.
2876 <td rowspan="2">
2877 <ul>
2878 <li>ANEURALNETWORKS_SPACE_TO_DEPTH
2879 </ul>
2880 <td>NESpaceToDepthLayer
2881 <td>
2882 <ul>
2883 <li>NHWC
2884 <li>NCHW
2885 </ul>
2886 <td>
2887 <table>
2888 <tr><th>src<th>dst
2889 <tr><td>All<td>All
2890 </table>
2891<tr>
2892 <td>CLSpaceToDepthLayer
2893 <td>
2894 <ul>
2895 <li>NHWC
2896 <li>NCHW
2897 </ul>
2898 <td>
2899 <table>
2900 <tr><th>src<th>dst
2901 <tr><td>All<td>All
2902 </table>
2903<tr>
2904 <td rowspan="2">Split
2905 <td rowspan="2" style="width:200px;"> Function to split a tensor along a given axis.
2906 <td rowspan="2">
2907 <ul>
2908 <li>ANEURALNETWORKS_SPLIT
2909 </ul>
2910 <td>NESplit
2911 <td>
2912 <ul>
2913 <li>All
2914 </ul>
2915 <td>
2916 <table>
2917 <tr><th>src<th>dst
2918 <tr><td>All<td>All
2919 </table>
2920<tr>
2921 <td>CLSplit
2922 <td>
2923 <ul>
2924 <li>All
2925 </ul>
2926 <td>
2927 <table>
2928 <tr><th>src<th>dst
2929 <tr><td>All<td>All
2930 </table>
2931<tr>
2932 <td rowspan="2">StackLayer
2933 <td rowspan="2" style="width:200px;"> Function to stack tensors along an axis.
2934 <td rowspan="2">
2935 <ul>
2936 <li>n/a
2937 </ul>
2938 <td>NEStackLayer
2939 <td>
2940 <ul>
2941 <li>All
2942 </ul>
2943 <td>
2944 <table>
2945 <tr><th>src<th>dst
2946 <tr><td>All<td>All
2947 </table>
2948<tr>
2949 <td>CLStackLayer
2950 <td>
2951 <ul>
2952 <li>All
2953 </ul>
2954 <td>
2955 <table>
2956 <tr><th>src<th>dst
2957 <tr><td>All<td>All
2958 </table>
2959<tr>
Sheri Zhanga47dcc22021-04-22 14:41:12 +01002960 <td rowspan="2">StridedSlice
2961 <td rowspan="2" style="width:200px;"> Function to extract a strided slice of a tensor.
2962 <td rowspan="2">
2963 <ul>
2964 <li>ANEURALNETWORKS_STRIDED_SLICE
2965 </ul>
2966 <td>NEStridedSlice
2967 <td>
2968 <ul>
2969 <li>All
2970 </ul>
2971 <td>
2972 <table>
2973 <tr><th>src<th>dst
2974 <tr><td>All<td>All
2975 </table>
2976<tr>
2977 <td>CLStridedSlice
2978 <td>
2979 <ul>
2980 <li>All
2981 </ul>
2982 <td>
2983 <table>
2984 <tr><th>src<th>dst
2985 <tr><td>All<td>All
2986 </table>
2987<tr>
Teresa Charlin62687422021-04-28 10:58:49 +01002988 <td rowspan="2">Tile
2989 <td rowspan="2" style="width:200px;"> Function to construct a tensor by tiling a given tensor.
2990 <td rowspan="2">
2991 <ul>
2992 <li>ANEURALNETWORKS_TILE
2993 </ul>
2994 <td>NETile
2995 <td>
2996 <ul>
2997 <li>All
2998 </ul>
2999 <td>
3000 <table>
3001 <tr><th>src<th>dst
3002 <tr><td>All<td>All
3003 </table>
3004<tr>
3005 <td>CLTile
3006 <td>
3007 <ul>
3008 <li>All
3009 </ul>
3010 <td>
3011 <table>
3012 <tr><th>src<th>dst
3013 <tr><td>All<td>All
3014 </table>
3015<tr>
Sheri Zhanga47dcc22021-04-22 14:41:12 +01003016 <td rowspan="2">Transpose
Teresa Charlin62687422021-04-28 10:58:49 +01003017 <td rowspan="2" style="width:200px;"> Function to transpose a 2D tensor.
Sheri Zhanga47dcc22021-04-22 14:41:12 +01003018 <td rowspan="2">
3019 <ul>
3020 <li>ANEURALNETWORKS_TRANSPOSE
3021 </ul>
3022 <td>NETranspose
3023 <td>
3024 <ul>
3025 <li>All
3026 </ul>
3027 <td>
3028 <table>
3029 <tr><th>src<th>dst
3030 <tr><td>All<td>All
3031 </table>
3032<tr>
3033 <td>CLTranspose
3034 <td>
3035 <ul>
3036 <li>All
3037 </ul>
3038 <td>
3039 <table>
3040 <tr><th>src<th>dst
3041 <tr><td>All<td>All
3042 </table>
Teresa Charlin62687422021-04-28 10:58:49 +01003043<tr>
3044 <td rowspan="2">Unstack
3045 <td rowspan="2" style="width:200px;"> Function to unpack a rank-R tensor into rank-(R-1) tensors.
3046 <td rowspan="2">
3047 <ul>
3048 <li>n/a
3049 </ul>
3050 <td>NEUnstack
3051 <td>
3052 <ul>
3053 <li>All
3054 </ul>
3055 <td>
3056 <table>
3057 <tr><th>src<th>dst
3058 <tr><td>All<td>All
3059 </table>
3060<tr>
3061 <td>CLUnstack
3062 <td>
3063 <ul>
3064 <li>All
3065 </ul>
3066 <td>
3067 <table>
3068 <tr><th>src<th>dst
3069 <tr><td>All<td>All
3070 </table>
3071<tr>
3072 <td rowspan="2">WinogradConvolutionLayer
3073 <td rowspan="2" style="width:200px;"> Function to do Winograd Convolution.
3074 <td rowspan="2">
3075 <ul>
3076 <li>ANEURALNETWORKS_CONV_2D
3077 </ul>
3078 <td>NEWinogradConvolutionLayer
3079 <td>
3080 <ul>
3081 <li>NHWC
3082 <li>NCHW
3083 </ul>
3084 <td>
3085 <table>
3086 <tr><th>src0<th>src1<th>src2<th>dst
3087 <tr><td>F16<td>F16<td>F16<td>F16
3088 <tr><td>F32<td>F32<td>F32<td>F32
3089 </table>
3090<tr>
3091 <td>CLWinogradConvolutionLayer
3092 <td>
3093 <ul>
3094 <li>NHWC
3095 <li>NCHW
3096 </ul>
3097 <td>
3098 <table>
3099 <tr><th>src0<th>src1<th>src2<th>dst
3100 <tr><td>F16<td>F16<td>F16<td>F16
3101 <tr><td>F32<td>F32<td>F32<td>F32
3102 </table>
Sheri Zhang6124ce62021-05-04 14:03:13 +01003103<tr>
3104 <td rowspan="1">WinogradInputTransform
Jakub Sujakee301b32021-06-04 09:46:08 +01003105 <td rowspan="1" style="width:200px;"> Function to perform a Winograd transform on the input tensor.
Sheri Zhang6124ce62021-05-04 14:03:13 +01003106 <td rowspan="1">
3107 <ul>
3108 <li>n/a
3109 </ul>
3110 <td>CLWinogradInputTransform
3111 <td>
3112 <ul>
3113 <li>NHWC
3114 <li>NCHW
3115 </ul>
3116 <td>
3117 <table>
3118 <tr><th>src<th>dst
3119 <tr><td>F16<td>F16
3120 <tr><td>F32<td>F32
3121 </table>
Sheri Zhanga47dcc22021-04-22 14:41:12 +01003122</table>
3123
3124*/
3125} // namespace