/aosp_15_r20/external/pytorch/torch/distributed/_tools/ |
H A D | fsdp2_mem_tracker.py | 67 all_gather_into_tensor: Callable 453 dist.all_gather_into_tensor, 471 @wraps(dist.all_gather_into_tensor) 472 def all_gather_into_tensor( function 489 return self._saved_collectives.all_gather_into_tensor( 541 dist.all_gather_into_tensor = all_gather_into_tensor 547 dist.all_gather_into_tensor = self._saved_collectives.all_gather_into_tensor
|
/aosp_15_r20/external/pytorch/test/distributed/_tensor/debug/ |
H A D | test_comm_mode.py | 65 self.assertEqual(comm_counts[c10d_functional.all_gather_into_tensor], 1) 91 self.assertEqual(comm_counts[c10d_functional.all_gather_into_tensor], 1) 113 self.assertEqual(comm_counts[c10d_functional.all_gather_into_tensor], 1) 136 dist.all_gather_into_tensor(all_gather_out, inp) 188 dist.all_gather_into_tensor(all_gather_out, inp)
|
H A D | test_comm_mode_features.py | 278 c10d_functional.all_gather_into_tensor 290 c10d_functional.all_gather_into_tensor 368 c10d_functional.all_gather_into_tensor
|
/aosp_15_r20/external/pytorch/test/distributed/_composable/fsdp/ |
H A D | test_fully_shard_overlap.py | 55 orig_all_gather_into_tensor = dist.all_gather_into_tensor 87 dist.all_gather_into_tensor(dummy_ag_output, dummy_ag_input) 109 dist.all_gather_into_tensor(dummy_ag_output, dummy_ag_input) 118 dist.all_gather_into_tensor(dummy_ag_output, dummy_ag_input) 160 orig_all_gather_into_tensor = dist.all_gather_into_tensor
|
H A D | test_fully_shard_compile.py | 156 torch.ops._c10d_functional.all_gather_into_tensor.default, 163 torch.ops._c10d_functional.all_gather_into_tensor.default,
|
/aosp_15_r20/external/pytorch/test/distributed/_tensor/ |
H A D | test_math_ops.py | 205 comm_mode.get_comm_counts()[funcol.all_gather_into_tensor], 527 funcol.all_gather_into_tensor: 2, 549 expected_bwd_comm[funcol.all_gather_into_tensor] = ( 555 funcol.all_gather_into_tensor: 1, 588 comm_mode.get_comm_counts()[funcol.all_gather_into_tensor], 612 self.assertEqual(comm_counts[funcol.all_gather_into_tensor], 1)
|
H A D | test_redistribute.py | 54 comm_mode.get_comm_counts()[funcol.all_gather_into_tensor], 1 161 comm_mode.get_comm_counts()[funcol.all_gather_into_tensor], 1 378 comm_mode.get_comm_counts()[funcol.all_gather_into_tensor],
|
H A D | test_utils.py | 499 comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 1 554 comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 0
|
/aosp_15_r20/external/pytorch/torch/_inductor/ |
H A D | comms.py | 352 torch.ops._c10d_functional.all_gather_into_tensor 397 torch.ops._c10d_functional.all_gather_into_tensor.default, 430 all_gather_into_tensor = ( 435 return all_gather_into_tensor
|
/aosp_15_r20/external/pytorch/torch/csrc/distributed/c10d/ |
H A D | Functional.cpp | 187 at::Tensor all_gather_into_tensor( in all_gather_into_tensor() function 345 ::all_gather_into_tensor), in TORCH_LIBRARY() 489 .typed<decltype(all_gather_into_tensor)>() in backward() 529 .typed<decltype(all_gather_into_tensor)>() in forward()
|
/aosp_15_r20/external/pytorch/test/distributed/tensor/parallel/ |
H A D | test_tp_style.py | 72 comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 1 143 comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 1 190 comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 1
|
H A D | test_tp_examples.py | 49 c10d_functional.all_gather_into_tensor, 129 comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 2
|
/aosp_15_r20/external/pytorch/torch/distributed/tensor/ |
H A D | _shards_wrapper.py | 87 … torch.ops._c10d_functional.all_gather_into_tensor.default: cls.handle_all_gather_into_tensor, 113 return torch.ops._c10d_functional.all_gather_into_tensor.default(
|
/aosp_15_r20/external/pytorch/torch/distributed/fsdp/ |
H A D | _exec_order_utils.py | 206 dist.all_gather_into_tensor( 238 dist.all_gather_into_tensor(
|
/aosp_15_r20/external/pytorch/torch/_inductor/fx_passes/ |
H A D | micro_pipeline_tp.py | 89 c10d.all_gather_into_tensor.default, 180 assert ag_node.target == c10d.all_gather_into_tensor.default 778 torch.ops._c10d_functional.all_gather_into_tensor.default,
|
/aosp_15_r20/external/pytorch/torch/distributed/ |
H A D | _functional_collectives.py | 204 tensor = torch.ops._c10d_functional.all_gather_into_tensor( 237 tensor = torch.ops._c10d_functional_autograd.all_gather_into_tensor( 1133 all_gather_into_tensor as legacy_allgather,
|
H A D | _functional_collectives_impl.py | 43 return torch.ops._c10d_functional.all_gather_into_tensor(
|
H A D | distributed_c10d.py | 2293 elif op0 == all_gather_into_tensor: 3350 def all_gather_into_tensor(output_tensor, input_tensor, group=None, async_op=False): function 3428 coll = _CollOp(all_gather_into_tensor, input_tensor, output_tensor) 3470 return all_gather_into_tensor(output_tensor, input_tensor, group, async_op) 4979 all_gather_into_tensor,
|
/aosp_15_r20/external/pytorch/test/distributed/fsdp/ |
H A D | test_fsdp_tp_integration.py | 205 dist.all_gather_into_tensor( 396 self.assertEqual(comm_counts[funcol.all_gather_into_tensor], 2)
|
H A D | test_fsdp_overlap.py | 102 orig_all_gather = torch.distributed.all_gather_into_tensor
|
/aosp_15_r20/external/pytorch/torch/distributed/tensor/debug/ |
H A D | _comm_mode.py | 32 funcol_native.all_gather_into_tensor: funcol_py.all_gather_into_tensor,
|
/aosp_15_r20/external/pytorch/torch/testing/_internal/ |
H A D | common_fsdp.py | 953 orig_all_gather = dist.all_gather_into_tensor 955 dist.all_gather_into_tensor = new_all_gather_into_tensor 960 dist.all_gather_into_tensor = orig_all_gather
|
/aosp_15_r20/external/pytorch/test/distributed/ |
H A D | test_inductor_collectives.py | 347 ag = torch.ops.c10d_functional.all_gather_into_tensor( 673 torch.distributed.all_gather_into_tensor( 722 torch.distributed.all_gather_into_tensor(
|
H A D | test_c10d_functional_native.py | 183 output = torch.ops._c10d_functional.all_gather_into_tensor( 228 ag0 = torch.ops._c10d_functional.all_gather_into_tensor.default(
|
/aosp_15_r20/external/pytorch/torch/distributed/_symmetric_memory/ |
H A D | __init__.py | 354 A = torch.ops._c10d_functional.all_gather_into_tensor( 412 A = torch.ops._c10d_functional.all_gather_into_tensor(
|