xref: /aosp_15_r20/external/pytorch/torch/optim/sparse_adam.py (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1# mypy: allow-untyped-defs
2from typing import List, Tuple, Union
3
4import torch
5from torch import Tensor
6
7from . import _functional as F
8from .optimizer import _maximize_doc, Optimizer, ParamsT
9
10
11__all__ = ["SparseAdam"]
12
13
14class SparseAdam(Optimizer):
15    def __init__(
16        self,
17        params: ParamsT,
18        lr: Union[float, Tensor] = 1e-3,
19        betas: Tuple[float, float] = (0.9, 0.999),
20        eps: float = 1e-8,
21        maximize: bool = False,
22    ):
23        if isinstance(lr, Tensor) and lr.numel() != 1:
24            raise ValueError("Tensor lr must be 1-element")
25        if not 0.0 < lr:
26            raise ValueError(f"Invalid learning rate: {lr}")
27        if not 0.0 < eps:
28            raise ValueError(f"Invalid epsilon value: {eps}")
29        if not 0.0 <= betas[0] < 1.0:
30            raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}")
31        if not 0.0 <= betas[1] < 1.0:
32            raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}")
33
34        defaults = dict(lr=lr, betas=betas, eps=eps, maximize=maximize)
35        super().__init__(params, defaults)
36
37        sparse_params = []
38        complex_params = []
39        for index, param_group in enumerate(self.param_groups):
40            assert isinstance(
41                param_group, dict
42            ), f"param_groups must be a list of dicts, but got {type(param_group)}"
43            # given param group, convert given params to a list first before iterating
44            for d_index, d_param in enumerate(param_group["params"]):
45                if d_param.is_sparse:
46                    sparse_params.append([index, d_index])
47                if d_param.is_complex():
48                    complex_params.append([index, d_index])
49        if sparse_params:
50            raise ValueError(
51                f"Sparse params at indices {sparse_params}: SparseAdam requires dense parameter tensors"
52            )
53        if complex_params:
54            raise ValueError(
55                f"Complex params at indices {complex_params}: SparseAdam does not support complex parameters"
56            )
57
58    @torch.no_grad()
59    def step(self, closure=None):
60        """Perform a single optimization step.
61
62        Args:
63            closure (Callable, optional): A closure that reevaluates the model
64                and returns the loss.
65        """
66        loss = None
67        if closure is not None:
68            with torch.enable_grad():
69                loss = closure()
70
71        for group in self.param_groups:
72            params_with_grad: List[Tensor] = []
73            grads: List[Tensor] = []
74            exp_avgs: List[Tensor] = []
75            exp_avg_sqs: List[Tensor] = []
76            state_steps: List[int] = []
77            beta1, beta2 = group["betas"]
78            maximize = group.get("maximize", False)
79
80            for p in group["params"]:
81                if p.grad is not None:
82                    params_with_grad.append(p)
83                    if not p.grad.is_sparse:
84                        raise RuntimeError(
85                            "SparseAdam does not support dense gradients, please consider Adam instead"
86                        )
87                    grads.append(p.grad)
88
89                    state = self.state[p]
90
91                    # State initialization
92                    if len(state) == 0:
93                        state["step"] = 0
94                        # Exponential moving average of gradient values
95                        state["exp_avg"] = torch.zeros_like(
96                            p, memory_format=torch.preserve_format
97                        )
98                        # Exponential moving average of squared gradient values
99                        state["exp_avg_sq"] = torch.zeros_like(
100                            p, memory_format=torch.preserve_format
101                        )
102
103                    exp_avgs.append(state["exp_avg"])
104                    exp_avg_sqs.append(state["exp_avg_sq"])
105
106                    # update the steps for each param group update
107                    state["step"] += 1
108                    # record the step after step update
109                    state_steps.append(state["step"])
110
111            F.sparse_adam(
112                params_with_grad,
113                grads,
114                exp_avgs,
115                exp_avg_sqs,
116                state_steps,
117                eps=group["eps"],
118                beta1=beta1,
119                beta2=beta2,
120                lr=group["lr"],
121                maximize=maximize,
122            )
123
124        return loss
125
126
127SparseAdam.__doc__ = rf"""SparseAdam implements a masked version of the Adam algorithm
128    suitable for sparse gradients. Currently, due to implementation constraints (explained
129    below), SparseAdam is only intended for a narrow subset of use cases, specifically
130    parameters of a dense layout with gradients of a sparse layout. This occurs in a
131    special case where the module backwards produces grads already in a sparse layout.
132    One example NN module that behaves as such is ``nn.Embedding(sparse=True)``.
133
134    SparseAdam approximates the Adam algorithm by masking out the parameter and moment
135    updates corresponding to the zero values in the gradients. Whereas the Adam algorithm
136    will update the first moment, the second moment, and the parameters based on all values
137    of the gradients, SparseAdam only updates the moments and parameters corresponding
138    to the non-zero values of the gradients.
139
140    A simplified way of thinking about the `intended` implementation is as such:
141
142    1. Create a mask of the non-zero values in the sparse gradients. For example,
143       if your gradient looks like [0, 5, 0, 0, 9], the mask would be [0, 1, 0, 0, 1].
144    2. Apply this mask over the running moments and do computation on only the
145       non-zero values.
146    3. Apply this mask over the parameters and only apply an update on non-zero values.
147
148    In actuality, we use sparse layout Tensors to optimize this approximation, which means the
149    more gradients that are masked by not being materialized, the more performant the optimization.
150    Since we rely on using sparse layout tensors, we infer that any materialized value in the
151    sparse layout is non-zero and we do NOT actually verify that all values are not zero!
152    It is important to not conflate a semantically sparse tensor (a tensor where many
153    of its values are zeros) with a sparse layout tensor (a tensor where ``.is_sparse``
154    returns ``True``). The SparseAdam approximation is intended for `semantically` sparse
155    tensors and the sparse layout is only a implementation detail. A clearer implementation
156    would be to use MaskedTensors, but those are experimental.
157
158
159    .. note::
160
161        If you suspect your gradients are semantically sparse (but do not have sparse
162        layout), this variant may not be the best for you. Ideally, you want to avoid
163        materializing anything that is suspected to be sparse in the first place, since
164        needing to convert all your grads from dense layout to sparse layout may outweigh
165        the performance gain. Here, using Adam may be the best alternative, unless you
166        can easily rig up your module to output sparse grads similar to
167        ``nn.Embedding(sparse=True)``. If you insist on converting your grads, you can do
168        so by manually overriding your parameters' ``.grad`` fields with their sparse
169        equivalents before calling ``.step()``.
170
171
172    Args:
173        params (iterable): iterable of parameters to optimize or dicts defining
174            parameter groups
175        lr (float, Tensor, optional): learning rate (default: 1e-3)
176        betas (Tuple[float, float], optional): coefficients used for computing
177            running averages of gradient and its square (default: (0.9, 0.999))
178        eps (float, optional): term added to the denominator to improve
179            numerical stability (default: 1e-8)
180        {_maximize_doc}
181
182    .. _Adam\: A Method for Stochastic Optimization:
183        https://arxiv.org/abs/1412.6980
184
185    """
186