blob: 3cd85cbed9887487339cc18f50eccc7627d905ce [file] [log] [blame]
surmeh0149b9e102018-05-17 14:11:25 +01001------ ArmNN for Android NNAPI supported operations ------
telsoa015307bc12018-03-09 13:51:08 +00002
Matthew Benthama3e23ca2019-05-13 12:49:59 +01003This release of ArmNN for Android supports use as a driver for the Android Neural Networks API. It implements the
Aron Virginas-Tarb23732b2019-06-17 15:48:45 +01004android.hardware.neuralnetworks@1.0, android.hardware.neuralnetworks@1.1 and android.hardware.neuralnetworks@1.2
5HAL interfaces.
telsoa015307bc12018-03-09 13:51:08 +00006
7For more information on the Android Neural Networks API, see https://developer.android.com/ndk/guides/neuralnetworks/index.html
8
9For integration and usage documentation, please see README.md.
10
11--- Support for Android Neural Networks HAL operations ---
12
Sadik Armagan1ba99892019-08-21 15:03:44 +010013The following AndroidNN HAL 1.0, 1.1 and 1.2 operations are currently supported:
telsoa015307bc12018-03-09 13:51:08 +000014
15AndroidNN operator Tensor type supported
Kevin May407718f2019-09-09 14:46:41 +010016ABS (FLOAT32)
Aron Virginas-Tar9acf5792019-11-18 16:21:23 +000017ADD (FLOAT32, QUANT8_ASYMM)
18AVERAGE_POOL_2D (FLOAT32, QUANT8_ASYMM)
19BATCH_TO_SPACE_ND (FLOAT32, QUANT8_ASYMM)
20CONCATENATION (FLOAT32, QUANT8_ASYMM)
21CONV_2D (FLOAT32, QUANT8_ASYMM, QUANT8_SYMM_PER_CHANNEL(only for weights))
22DEPTH_TO_SPACE (FLOAT32, FLOAT16, QUANT8_ASYMM)
23DEPTHWISE_CONV_2D (FLOAT32, QUANT8_ASYMM, QUANT8_SYMM_PER_CHANNEL(only for weights))
24DEQUANTIZE (FLOAT32 (output only), QUANT8_ASYMM (input only))
25DIV (FLOAT32, QUANT8_ASYMM)
26EQUAL (FLOAT32, QUANT8_ASYMM)
27EXPAND_DIMS (FLOAT32, FLOAT16, QUANT8_ASYMM)
telsoa015307bc12018-03-09 13:51:08 +000028FLOOR (FLOAT32)
Aron Virginas-Tar9acf5792019-11-18 16:21:23 +000029FULLY_CONNECTED (FLOAT32, QUANT8_ASYMM)
30GREATER (FLOAT32, QUANT8_ASYMM)
31GREATER_EQUAL (FLOAT32, QUANT8_ASYMM)
32GROUPED_CONV_2D (FLOAT32, QUANT8_ASYMM, QUANT8_SYMM_PER_CHANNEL(only for weights))
Aron Virginas-Tarad754532019-10-10 14:02:37 +010033INSTANCE_NORMALIZATION (FLOAT32)
telsoa015307bc12018-03-09 13:51:08 +000034L2_NORMALIZATION (FLOAT32)
Aron Virginas-Tar9acf5792019-11-18 16:21:23 +000035L2_POOL_2D (FLOAT32, QUANT8_ASYMM)
36LESS (FLOAT32, QUANT8_ASYMM)
37LESS_EQUAL (FLOAT32, QUANT8_ASYMM)
telsoa015307bc12018-03-09 13:51:08 +000038LOCAL_RESPONSE_NORMALIZATION (FLOAT32)
Aron Virginas-Tar9acf5792019-11-18 16:21:23 +000039LOGISTIC (FLOAT32, QUANT8_ASYMM)
Aron Virginas-Tar75e67792019-10-15 13:33:03 +010040LOG_SOFTMAX (FLOAT32)
Ferran Balaguerd04c0432018-11-15 14:48:05 +000041LSTM (FLOAT32)
Aron Virginas-Tar9acf5792019-11-18 16:21:23 +000042MAXIMUM (FLOAT32, QUANT8_ASYMM)
43MAX_POOL_2D (FLOAT32, QUANT8_ASYMM)
44MEAN (FLOAT32, QUANT8_ASYMM)
45MINIMUM (FLOAT32, QUANT8_ASYMM)
46MUL (FLOAT32, QUANT8_ASYMM)
47NOT_EQUAL (FLOAT32, QUANT8_ASYMM)
48PAD (FLOAT32, QUANT8_ASYMM)
49PAD_V2 (FLOAT32, QUANT8_ASYMM)
50PRELU (FLOAT32, QUANT8_ASYMM)
51QUANTIZE (FLOAT32 (input only), QUANT8_ASYMM (output only))
Sadik Armagan1ba99892019-08-21 15:03:44 +010052QUANTIZED_16BIT_LSTM (QUANT8_ASYMM)
Aron Virginas-Tar9acf5792019-11-18 16:21:23 +000053RELU (FLOAT32, QUANT8_ASYMM)
54RELU1 (FLOAT32, QUANT8_ASYMM)
55RELU6 (FLOAT32, QUANT8_ASYMM)
56RESHAPE (FLOAT32, QUANT8_ASYMM)
57RESIZE_BILINEAR (FLOAT32, QUANT8_ASYMM)
58RESIZE_NEAREST_NEIGHBOR (FLOAT32, QUANT8_ASYMM)
Aron Virginas-Tara97efbb2019-09-10 14:46:41 +010059RSQRT (FLOAT32)
Aron Virginas-Tar9acf5792019-11-18 16:21:23 +000060SOFTMAX (FLOAT32, QUANT8_ASYMM)
61SPACE_TO_BATCH_ND (FLOAT32, QUANT8_ASYMM)
62SPACE_TO_DEPTH (FLOAT32, QUANT8_ASYMM)
Sadik Armagan701d9a02019-09-04 15:16:18 +010063SQRT (FLOAT32)
Aron Virginas-Tar9acf5792019-11-18 16:21:23 +000064SQUEEZE (FLOAT32, QUANT8_ASYMM)
65STRIDED_SLICE (FLOAT32, QUANT8_ASYMM)
66SUB (FLOAT32, QUANT8_ASYMM)
67TANH (FLOAT32, QUANT8_ASYMM)
68TRANSPOSE (FLOAT32, QUANT8_ASYMM)
69TRANSPOSE_CONV_2D (FLOAT32, QUANT8_ASYMM,QUANT8_SYMM_PER_CHANNEL(only for weights))
telsoa015307bc12018-03-09 13:51:08 +000070
Mike Kelly56df76c2019-06-14 15:51:39 +010071Where operations are not supported by the ArmNN Android NN Driver, the driver indicates this to the framework
72appropriately and the framework implements those operations using a CPU implementation.
Aron Virginas-Tar9acf5792019-11-18 16:21:23 +000073
74NOTE: By convention, only those tensor types have been listed above, which are fully supported across all
75ArmNN backends. FLOAT16 input tensors are partially supported on most HAL 1.2 operators on the GpuAcc and
76CpuRef backends, however not on CpuAcc.