xref: /aosp_15_r20/external/pytorch/torch/testing/_internal/distributed/fake_pg.py (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1# mypy: allow-untyped-defs
2
3import torch.distributed as dist
4
5from torch._C._distributed_c10d import (
6    FakeProcessGroup,
7)
8
9
10class FakeStore(dist.Store):
11    """
12    A fake store is a fake Key-Value store simply for initialization usage
13    the of fake process group, one can either use FakeStore or HashStore.
14    """
15
16
17def _create_fake_pg(prefix_store, rank, world_size, timeout):
18    """
19    A fake process group (not related to FakeTensor) is a process group which
20    doesn't actually do any communication, it just hallucinates some
21    communication.  You can run a single rank with a fake process group
22    without needing multiple processes (simulates per-rank behavior)
23
24    NOTE: This is not a real process group, and it would produce wrong results
25    for every collective. It should be used as a convinient tool when playing
26    with distributed but don't care about the actual data.
27    """
28    return FakeProcessGroup(rank, world_size)
29
30
31dist.Backend.register_backend("fake", _create_fake_pg, devices=['cpu', 'cuda'])
32