/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/test/ |
H A D | global-average-pooling.cc | 16 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 25 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 43 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 62 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 84 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 106 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 128 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 150 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 169 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 186 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() [all …]
|
H A D | average-pooling.cc | 16 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 28 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 47 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 69 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 89 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 108 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 130 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 152 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 183 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 212 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() [all …]
|
H A D | max-pooling.cc | 16 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 28 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 47 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 71 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 91 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 111 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 130 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 154 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 174 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 194 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() [all …]
|
H A D | convolution.cc | 106 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 119 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 133 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 147 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 161 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 175 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 189 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 203 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 635 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() 649 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in TEST() [all …]
|
H A D | channel-shuffle-operator-tester.h | 108 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in testX8()
|
H A D | max-pooling-operator-tester.h | 422 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in testU8() 560 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in testSetupU8()
|
H A D | fully-connected-sparse-operator-tester.h | 294 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in testQ8() 495 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in testQ8_prepacked()
|
H A D | softargmax-operator-tester.h | 145 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in testQ8()
|
H A D | clamp-operator-tester.h | 122 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in testU8()
|
H A D | average-pooling-operator-tester.h | 442 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in testQ8() 606 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in testSetupQ8()
|
H A D | sigmoid-operator-tester.h | 157 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in testQ8()
|
H A D | hardsigmoid-operator-tester.h | 158 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in testQ8()
|
H A D | tanh-operator-tester.h | 157 ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize()); in testQ8()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/bench/ |
H A D | add.cc | 33 pytorch_qnnp_status status = pytorch_qnnp_initialize(); in add_nc_q8() 101 pytorch_qnnp_status status = pytorch_qnnp_initialize(); in add_nc_q8_inplace()
|
H A D | global-average-pooling.cc | 40 pytorch_qnnp_status status = pytorch_qnnp_initialize(); in global_average_pooling_q8()
|
H A D | softargmax.cc | 32 pytorch_qnnp_status status = pytorch_qnnp_initialize(); in softargmax_q8()
|
H A D | tanh.cc | 32 pytorch_qnnp_status status = pytorch_qnnp_initialize(); in tanh_q8()
|
H A D | hardswish.cc | 32 pytorch_qnnp_status status = pytorch_qnnp_initialize(); in hardswish_q8()
|
H A D | hardsigmoid.cc | 32 pytorch_qnnp_status status = pytorch_qnnp_initialize(); in hardsigmoid_q8()
|
H A D | sigmoid.cc | 32 pytorch_qnnp_status status = pytorch_qnnp_initialize(); in sigmoid_q8()
|
H A D | max-pooling.cc | 49 pytorch_qnnp_status status = pytorch_qnnp_initialize(); in max_pooling_u8()
|
H A D | average-pooling.cc | 49 pytorch_qnnp_status status = pytorch_qnnp_initialize(); in average_pooling_q8()
|
H A D | channel-shuffle.cc | 32 pytorch_qnnp_status status = pytorch_qnnp_initialize(); in channel_shuffle_x8()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/ |
H A D | init_qnnpack.cpp | 16 c10::call_once(once, []() { qnnpackStatus = pytorch_qnnp_initialize(); }); in initQNNPACK()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/ |
H A D | README.md | 46 …pytorch_qnnp_initialize` will fail with `pytorch_qnnp_status_unsupported_hardware` if the mobile C…
|