1------ ArmNN for Android NNAPI supported operations ------ 2 3This release of ArmNN for Android supports use as a driver for the Android Neural Networks API. It implements the 4[email protected], [email protected], [email protected] and 5[email protected] 6HAL interfaces. 7 8For more information on the Android Neural Networks API, see https://developer.android.com/ndk/guides/neuralnetworks/index.html 9 10For integration and usage documentation, please see README.md. 11 12--- Support for Android Neural Networks HAL operations --- 13 14The following AndroidNN HAL 1.0, 1.1, 1.2 and 1.3 operations are currently supported: 15 16AndroidNN operator Tensor type supported 17ABS (FLOAT32, FLOAT16, INT32) 18ADD (FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 19ARGMAX (FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 20ARGMIN (FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 21AVERAGE_POOL_2D (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 22BATCH_TO_SPACE_ND (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 23CAST (FLOAT32, FLOAT16, INT32, QUANT8_ASYMM) 24CONCATENATION (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 25CHANNEL_SHUFFLE (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 26CONV_2D (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 27DEPTH_TO_SPACE (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 28DEPTHWISE_CONV_2D (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 29DEQUANTIZE (FLOAT32 (output only), QUANT8_ASYMM and QUANT8_ASYMM_SIGNED (input only)) 30DIV (FLOAT32, FLOAT16, INT32) 31ELU (FLOAT32, FLOAT16, QUANT8_ASYMM) 32EQUAL (BOOL8, FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 33EXP (FLOAT32, FLOAT16) 34EXPAND_DIMS (FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 35FILL (FLOAT32, FLOAT16, INT32) 36FLOOR (FLOAT32, FLOAT16) 37FULLY_CONNECTED (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 38GREATER (BOOL8, FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 39GREATER_EQUAL (BOOL8, FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 40GROUPED_CONV_2D (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 41HARD_SWISH (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 42INSTANCE_NORMALIZATION (FLOAT32, FLOAT16) 43L2_NORMALIZATION (FLOAT32) 44L2_POOL_2D (FLOAT32, FLOAT16) 45LESS (BOOL8, FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 46LESS_EQUAL (BOOL8, FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 47LOCAL_RESPONSE_NORMALIZATION (FLOAT32) 48LOG (FLOAT32, FLOAT16) 49LOGICAL_AND (BOOL8) 50LOGICAL_NOT (BOOL8) 51LOGICAL_OR (BOOL8) 52LOGISTIC (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 53LOG_SOFTMAX (FLOAT32, FLOAT16) 54LSTM (FLOAT32) 55MAXIMUM (FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 56MAX_POOL_2D (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 57MEAN (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 58MINIMUM (FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 59MUL (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 60NEG (FLOAT32, FLOAT16) 61NOT_EQUAL (BOOL8, FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 62PAD (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 63PAD_V2 (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 64PRELU (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 65QUANTIZE (FLOAT32 (input only), QUANT8_ASYMM and QUANT8_ASYMM_SIGNED (output only)) 66QUANTIZED_16BIT_LSTM (QUANT8_ASYMM) 67QUANTIZED_LSTM (QUANT8_ASYMM) 68REDUCE_MAX (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 69REDUCE_MIN (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 70REDUCE_PROD (FLOAT32, FLOAT16) 71REDUCE_SUM (FLOAT32, FLOAT16) 72RELU (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 73RELU1 (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 74RELU6 (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 75RESHAPE (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 76RESIZE_BILINEAR (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 77RESIZE_NEAREST_NEIGHBOR (FLOAT32, FLOAT16, QUANT8_ASYMM) 78RSQRT (FLOAT32, FLOAT16) 79SIN (FLOAT32, FLOAT16) 80SOFTMAX (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 81SPACE_TO_BATCH_ND (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 82SPACE_TO_DEPTH (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 83SQRT (FLOAT32, FLOAT16) 84SQUEEZE (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 85STRIDED_SLICE (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 86SUB (FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 87TANH (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 88TRANSPOSE (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 89TRANSPOSE_CONV_2D (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 90UNIDIRECTIONAL_SEQUENCE_LSTM (FLOAT32, FLOAT16) 91 92Where operations are not supported by the ArmNN Android NN Driver, the driver indicates this to the framework 93appropriately and the framework implements those operations using a CPU implementation. 94 95NOTE: By convention, only those tensor types have been listed above, which are fully supported across all 96ArmNN backends. 97 - FLOAT16 input tensors are partially supported on most HAL 1.2 and 1.3 operators on the GpuAcc and 98 CpuRef backends, however not on CpuAcc. 99