1*3e777be0SXin Li------ ArmNN for Android NNAPI supported operations ------ 2*3e777be0SXin Li 3*3e777be0SXin LiThis release of ArmNN for Android supports use as a driver for the Android Neural Networks API. It implements the 4*3e777be0SXin Li[email protected], [email protected], [email protected] and 5*3e777be0SXin Li[email protected] 6*3e777be0SXin LiHAL interfaces. 7*3e777be0SXin Li 8*3e777be0SXin LiFor more information on the Android Neural Networks API, see https://developer.android.com/ndk/guides/neuralnetworks/index.html 9*3e777be0SXin Li 10*3e777be0SXin LiFor integration and usage documentation, please see README.md. 11*3e777be0SXin Li 12*3e777be0SXin Li--- Support for Android Neural Networks HAL operations --- 13*3e777be0SXin Li 14*3e777be0SXin LiThe following AndroidNN HAL 1.0, 1.1, 1.2 and 1.3 operations are currently supported: 15*3e777be0SXin Li 16*3e777be0SXin LiAndroidNN operator Tensor type supported 17*3e777be0SXin LiABS (FLOAT32, FLOAT16, INT32) 18*3e777be0SXin LiADD (FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 19*3e777be0SXin LiARGMAX (FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 20*3e777be0SXin LiARGMIN (FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 21*3e777be0SXin LiAVERAGE_POOL_2D (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 22*3e777be0SXin LiBATCH_TO_SPACE_ND (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 23*3e777be0SXin LiCAST (FLOAT32, FLOAT16, INT32, QUANT8_ASYMM) 24*3e777be0SXin LiCONCATENATION (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 25*3e777be0SXin LiCHANNEL_SHUFFLE (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 26*3e777be0SXin LiCONV_2D (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 27*3e777be0SXin LiDEPTH_TO_SPACE (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 28*3e777be0SXin LiDEPTHWISE_CONV_2D (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 29*3e777be0SXin LiDEQUANTIZE (FLOAT32 (output only), QUANT8_ASYMM and QUANT8_ASYMM_SIGNED (input only)) 30*3e777be0SXin LiDIV (FLOAT32, FLOAT16, INT32) 31*3e777be0SXin LiELU (FLOAT32, FLOAT16, QUANT8_ASYMM) 32*3e777be0SXin LiEQUAL (BOOL8, FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 33*3e777be0SXin LiEXP (FLOAT32, FLOAT16) 34*3e777be0SXin LiEXPAND_DIMS (FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 35*3e777be0SXin LiFILL (FLOAT32, FLOAT16, INT32) 36*3e777be0SXin LiFLOOR (FLOAT32, FLOAT16) 37*3e777be0SXin LiFULLY_CONNECTED (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 38*3e777be0SXin LiGREATER (BOOL8, FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 39*3e777be0SXin LiGREATER_EQUAL (BOOL8, FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 40*3e777be0SXin LiGROUPED_CONV_2D (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 41*3e777be0SXin LiHARD_SWISH (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 42*3e777be0SXin LiINSTANCE_NORMALIZATION (FLOAT32, FLOAT16) 43*3e777be0SXin LiL2_NORMALIZATION (FLOAT32) 44*3e777be0SXin LiL2_POOL_2D (FLOAT32, FLOAT16) 45*3e777be0SXin LiLESS (BOOL8, FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 46*3e777be0SXin LiLESS_EQUAL (BOOL8, FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 47*3e777be0SXin LiLOCAL_RESPONSE_NORMALIZATION (FLOAT32) 48*3e777be0SXin LiLOG (FLOAT32, FLOAT16) 49*3e777be0SXin LiLOGICAL_AND (BOOL8) 50*3e777be0SXin LiLOGICAL_NOT (BOOL8) 51*3e777be0SXin LiLOGICAL_OR (BOOL8) 52*3e777be0SXin LiLOGISTIC (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 53*3e777be0SXin LiLOG_SOFTMAX (FLOAT32, FLOAT16) 54*3e777be0SXin LiLSTM (FLOAT32) 55*3e777be0SXin LiMAXIMUM (FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 56*3e777be0SXin LiMAX_POOL_2D (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 57*3e777be0SXin LiMEAN (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 58*3e777be0SXin LiMINIMUM (FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 59*3e777be0SXin LiMUL (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 60*3e777be0SXin LiNEG (FLOAT32, FLOAT16) 61*3e777be0SXin LiNOT_EQUAL (BOOL8, FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 62*3e777be0SXin LiPAD (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 63*3e777be0SXin LiPAD_V2 (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 64*3e777be0SXin LiPRELU (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 65*3e777be0SXin LiQUANTIZE (FLOAT32 (input only), QUANT8_ASYMM and QUANT8_ASYMM_SIGNED (output only)) 66*3e777be0SXin LiQUANTIZED_16BIT_LSTM (QUANT8_ASYMM) 67*3e777be0SXin LiQUANTIZED_LSTM (QUANT8_ASYMM) 68*3e777be0SXin LiREDUCE_MAX (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 69*3e777be0SXin LiREDUCE_MIN (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 70*3e777be0SXin LiREDUCE_PROD (FLOAT32, FLOAT16) 71*3e777be0SXin LiREDUCE_SUM (FLOAT32, FLOAT16) 72*3e777be0SXin LiRELU (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 73*3e777be0SXin LiRELU1 (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 74*3e777be0SXin LiRELU6 (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 75*3e777be0SXin LiRESHAPE (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 76*3e777be0SXin LiRESIZE_BILINEAR (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 77*3e777be0SXin LiRESIZE_NEAREST_NEIGHBOR (FLOAT32, FLOAT16, QUANT8_ASYMM) 78*3e777be0SXin LiRSQRT (FLOAT32, FLOAT16) 79*3e777be0SXin LiSIN (FLOAT32, FLOAT16) 80*3e777be0SXin LiSOFTMAX (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 81*3e777be0SXin LiSPACE_TO_BATCH_ND (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 82*3e777be0SXin LiSPACE_TO_DEPTH (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 83*3e777be0SXin LiSQRT (FLOAT32, FLOAT16) 84*3e777be0SXin LiSQUEEZE (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 85*3e777be0SXin LiSTRIDED_SLICE (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 86*3e777be0SXin LiSUB (FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 87*3e777be0SXin LiTANH (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 88*3e777be0SXin LiTRANSPOSE (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 89*3e777be0SXin LiTRANSPOSE_CONV_2D (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) 90*3e777be0SXin LiUNIDIRECTIONAL_SEQUENCE_LSTM (FLOAT32, FLOAT16) 91*3e777be0SXin Li 92*3e777be0SXin LiWhere operations are not supported by the ArmNN Android NN Driver, the driver indicates this to the framework 93*3e777be0SXin Liappropriately and the framework implements those operations using a CPU implementation. 94*3e777be0SXin Li 95*3e777be0SXin LiNOTE: By convention, only those tensor types have been listed above, which are fully supported across all 96*3e777be0SXin LiArmNN backends. 97*3e777be0SXin Li - FLOAT16 input tensors are partially supported on most HAL 1.2 and 1.3 operators on the GpuAcc and 98*3e777be0SXin Li CpuRef backends, however not on CpuAcc. 99