/aosp_15_r20/external/pytorch/test/ |
H A D | test_xnnpack_integration.py | 142 xnnpack_result = torch.ops.prepacked.conv2d_clamp_run( 369 return torch.ops.prepacked.conv2d_clamp_run(x, self.packed_weight_bias) 669 o = torch.ops.prepacked.conv2d_clamp_run( 938 "prepacked::conv2d_clamp_run": 1, 1004 "prepacked::conv2d_clamp_run": 1, 1022 "prepacked::conv2d_clamp_run": 1, 1044 "prepacked::conv2d_clamp_run": 1, 1066 "prepacked::conv2d_clamp_run": 1, 1088 "prepacked::conv2d_clamp_run": 1, 1385 "prepacked::conv2d_clamp_run": 1, [all …]
|
H A D | test_vulkan.py | 85 "vulkan_prepack::conv2d_clamp_run": 1} 107 "vulkan_prepack::conv2d_clamp_run": 1} 146 "vulkan_prepack::conv2d_clamp_run": 1}
|
H A D | test_mobile_optimizer.py | 114 .check_count("prepacked::conv2d_clamp_run", 1, exactly=True) \ 126 .check_count("prepacked::conv2d_clamp_run", 1, exactly=True) \ 142 .check_not("prepacked::conv2d_clamp_run") \
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/ |
H A D | xnnpack_rewrite.cpp | 132 %res = prepacked::conv2d_clamp_run(%input, %packed_weight_bias) in insertPrePackedConv2dOp() 187 %res = prepacked::conv2d_clamp_run(%input, %packed_weight_bias) in fuseHardtanhWithPackedOps() 212 %conv2d_res = prepacked::conv2d_clamp_run(%input, %packed_weight_bias) in fuseHardtanhWithPackedOps() 238 %conv2d_res = prepacked::conv2d_clamp_run(%input, %packed_weight_bias) in fuseHardtanhWithPackedOps() 281 %res = prepacked::conv2d_clamp_run(%input, %packed_weight_bias) in fuseReluWithPackedOps() 307 %conv2d_res = prepacked::conv2d_clamp_run(%input, %packed_weight_bias) in fuseReluWithPackedOps() 334 %conv2d_res = prepacked::conv2d_clamp_run(%input, %packed_weight_bias) in fuseReluWithPackedOps()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/xnnpack/ |
H A D | RegisterOpContextClass.cpp | 82 …m.def(TORCH_SELECTIVE_SCHEMA("prepacked::conv2d_clamp_run(Tensor X, __torch__.torch.classes.xnnpac… in TORCH_LIBRARY() 91 …m.impl(TORCH_SELECTIVE_NAME("prepacked::conv2d_clamp_run"), TORCH_FN(internal::convolution2d::conv… in TORCH_LIBRARY_IMPL()
|
H A D | Convolution.h | 35 Tensor conv2d_clamp_run(
|
H A D | Convolution.cpp | 440 Tensor conv2d_clamp_run( in conv2d_clamp_run() function
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/vulkan/ops/ |
H A D | Register.cpp | 179 "vulkan_prepack::conv2d_clamp_run(Tensor X, " in TORCH_LIBRARY() 313 TORCH_SELECTIVE_NAME("vulkan_prepack::conv2d_clamp_run"), in TORCH_LIBRARY_IMPL() 314 TORCH_FN(conv2d_clamp_run)); // Backwards compatibility in TORCH_LIBRARY_IMPL()
|
H A D | Convolution.h | 217 Tensor conv2d_clamp_run(
|
H A D | Convolution.cpp | 1321 Tensor conv2d_clamp_run( in conv2d_clamp_run() function
|
/aosp_15_r20/external/pytorch/test/jit/ |
H A D | test_optimize_for_mobile_preserve_debug_info.py | 136 "prepacked::conv2d_clamp_run": "aten::conv2d", 228 "prepacked::conv2d_clamp_run": conv2d_activation_kind,
|
/aosp_15_r20/external/pytorch/torch/_export/passes/ |
H A D | replace_quantized_ops_with_standard_ops_pass.py | 431 if opname == "conv2d_clamp_run": 444 if opname == "conv2d_clamp_run": 557 …For prepacked::conv2d_clamp_run and prepacked::linear_clamp_run, we directly convert them to aten.…
|
/aosp_15_r20/external/pytorch/test/mobile/model_test/ |
H A D | coverage.yaml | 647 - prepacked::conv2d_clamp_run 1016 prepacked::conv2d_clamp_run: 32
|
H A D | model_ops.yaml | 395 prepacked::conv2d_clamp_run: 41
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/runtime/ |
H A D | symbolic_shape_registry.cpp | 63 …{"prepacked::conv2d_clamp_run(Tensor X, __torch__.torch.classes.xnnpack.Conv2dOpContext W_prepack)… in conditionally_defined_ops()
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/tensorexpr/ |
H A D | lowerings.cpp | 37 …{"prepacked::conv2d_clamp_run(Tensor X, __torch__.torch.classes.xnnpack.Conv2dOpContext W_prepack)… in nnc_lowerings_lazy_registration()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/test/ |
H A D | vulkan_api_test.cpp | 1540 "vulkan_prepack::conv2d_clamp_run", in test_backwards_compatible_conv2d_context() 2142 "prepacked::conv2d_clamp_run", in TEST_F()
|