/aosp_15_r20/external/pytorch/test/distributed/_composable/ |
H A D | test_replicate_with_compiler.py | 351 fc.check("aten.flatten.using_ints(").check("cpp_fused_").check( 363 fc.check("aten.flatten.using_ints(").check("cpp_fused_").check(
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/metal/ops/ |
H A D | MetalReshape.mm | 107 m.impl(TORCH_SELECTIVE_NAME("aten::flatten.using_ints"), TORCH_FN(flatten_using_ints));
|
/aosp_15_r20/external/executorch/backends/arm/quantizer/ |
H A D | arm_quantizer_utils.py | 152 torch.ops.aten.flatten.using_ints,
|
/aosp_15_r20/external/pytorch/test/mobile/model_test/ |
H A D | coverage.yaml | 233 - aten::flatten.using_ints 815 aten::flatten.using_ints: 45
|
H A D | model_ops.yaml | 149 aten::flatten.using_ints: 74
|
/aosp_15_r20/external/pytorch/torch/ao/quantization/pt2e/ |
H A D | port_metadata_pass.py | 109 torch.ops.aten.flatten.using_ints,
|
/aosp_15_r20/external/pytorch/torch/ao/quantization/quantizer/ |
H A D | x86_inductor_quantizer.py | 89 torch.ops.aten.flatten.using_ints, 218 torch.ops.aten.flatten.using_ints,
|
H A D | xnnpack_quantizer_utils.py | 1016 torch.ops.aten.flatten.using_ints,
|
/aosp_15_r20/external/pytorch/aten/src/ATen/functorch/ |
H A D | BatchRulesDecompositions.cpp | 129 OP_DECOMPOSE2(flatten, using_ints); in TORCH_LIBRARY_IMPL()
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/mobile/model_tracer/ |
H A D | TracerRunner.cpp | 40 "aten::flatten.using_ints",
|
/aosp_15_r20/external/pytorch/torch/_inductor/fx_passes/ |
H A D | ddp_fusion.py | 197 call_function(graph, aten.flatten.using_ints, (input_node,))
|
/aosp_15_r20/external/executorch/backends/qualcomm/quantizer/ |
H A D | annotators.py | 663 @register_annotator([torch.ops.aten.flatten.using_ints])
|
/aosp_15_r20/external/pytorch/aten/src/ATen/core/ |
H A D | NamedRegistrations.cpp | 190 m.impl("flatten.using_ints", CppFunction::makeFallthrough()); in TORCH_LIBRARY_IMPL()
|
/aosp_15_r20/external/pytorch/torch/jit/ |
H A D | _shape_functions.py | 1356 "aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)",
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/runtime/static/ |
H A D | native_ops.cpp | 403 … "aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)"))) { in __anon75e5f0512702()
|
H A D | passes.cpp | 406 … "static_runtime::flatten_copy.using_ints(Tensor self, int start_dim=0, int end_dim=-1) -> Tensor", in TORCH_LIBRARY_FRAGMENT()
|
H A D | ops.cpp | 1708 …"static_runtime::flatten_copy.using_ints(Tensor self, int start_dim=0, int end_dim=-1) -> Tensor")… in __anon11f46a8b4802()
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/tensorexpr/ |
H A D | lowerings.cpp | 1823 {"aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> (Tensor(a))"}, in nnc_lowerings_lazy_registration()
|
/aosp_15_r20/external/pytorch/test/quantization/pt2e/ |
H A D | test_x86inductor_quantizer.py | 955 torch.ops.aten.flatten.using_ints,
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/runtime/ |
H A D | serialized_shape_function_registry.cpp | 3299 …{"aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)", "flatte… in GetShapeFunctionMappings()
|
/aosp_15_r20/external/pytorch/torch/testing/_internal/ |
H A D | common_methods_invocations.py | 14747 # got: Batching rule not implemented for aten::flatten.using_ints 14768 # got: Batching rule not implemented for aten::flatten.using_ints 14792 # got: Batching rule not implemented for aten::flatten.using_ints 15642 # got: Batching rule not implemented for aten::flatten.using_ints 15668 # got: Batching rule not implemented for aten::flatten.using_ints 15707 # got: Batching rule not implemented for aten::flatten.using_ints
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | native_functions.yaml | 2645 - func: flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)
|