xref: /aosp_15_r20/external/pytorch/torch/distributions/laplace.py (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1# mypy: allow-untyped-defs
2from numbers import Number
3
4import torch
5from torch.distributions import constraints
6from torch.distributions.distribution import Distribution
7from torch.distributions.utils import broadcast_all
8from torch.types import _size
9
10
11__all__ = ["Laplace"]
12
13
14class Laplace(Distribution):
15    r"""
16    Creates a Laplace distribution parameterized by :attr:`loc` and :attr:`scale`.
17
18    Example::
19
20        >>> # xdoctest: +IGNORE_WANT("non-deterministic")
21        >>> m = Laplace(torch.tensor([0.0]), torch.tensor([1.0]))
22        >>> m.sample()  # Laplace distributed with loc=0, scale=1
23        tensor([ 0.1046])
24
25    Args:
26        loc (float or Tensor): mean of the distribution
27        scale (float or Tensor): scale of the distribution
28    """
29    arg_constraints = {"loc": constraints.real, "scale": constraints.positive}
30    support = constraints.real
31    has_rsample = True
32
33    @property
34    def mean(self):
35        return self.loc
36
37    @property
38    def mode(self):
39        return self.loc
40
41    @property
42    def variance(self):
43        return 2 * self.scale.pow(2)
44
45    @property
46    def stddev(self):
47        return (2**0.5) * self.scale
48
49    def __init__(self, loc, scale, validate_args=None):
50        self.loc, self.scale = broadcast_all(loc, scale)
51        if isinstance(loc, Number) and isinstance(scale, Number):
52            batch_shape = torch.Size()
53        else:
54            batch_shape = self.loc.size()
55        super().__init__(batch_shape, validate_args=validate_args)
56
57    def expand(self, batch_shape, _instance=None):
58        new = self._get_checked_instance(Laplace, _instance)
59        batch_shape = torch.Size(batch_shape)
60        new.loc = self.loc.expand(batch_shape)
61        new.scale = self.scale.expand(batch_shape)
62        super(Laplace, new).__init__(batch_shape, validate_args=False)
63        new._validate_args = self._validate_args
64        return new
65
66    def rsample(self, sample_shape: _size = torch.Size()) -> torch.Tensor:
67        shape = self._extended_shape(sample_shape)
68        finfo = torch.finfo(self.loc.dtype)
69        if torch._C._get_tracing_state():
70            # [JIT WORKAROUND] lack of support for .uniform_()
71            u = torch.rand(shape, dtype=self.loc.dtype, device=self.loc.device) * 2 - 1
72            return self.loc - self.scale * u.sign() * torch.log1p(
73                -u.abs().clamp(min=finfo.tiny)
74            )
75        u = self.loc.new(shape).uniform_(finfo.eps - 1, 1)
76        # TODO: If we ever implement tensor.nextafter, below is what we want ideally.
77        # u = self.loc.new(shape).uniform_(self.loc.nextafter(-.5, 0), .5)
78        return self.loc - self.scale * u.sign() * torch.log1p(-u.abs())
79
80    def log_prob(self, value):
81        if self._validate_args:
82            self._validate_sample(value)
83        return -torch.log(2 * self.scale) - torch.abs(value - self.loc) / self.scale
84
85    def cdf(self, value):
86        if self._validate_args:
87            self._validate_sample(value)
88        return 0.5 - 0.5 * (value - self.loc).sign() * torch.expm1(
89            -(value - self.loc).abs() / self.scale
90        )
91
92    def icdf(self, value):
93        term = value - 0.5
94        return self.loc - self.scale * (term).sign() * torch.log1p(-2 * term.abs())
95
96    def entropy(self):
97        return 1 + torch.log(2 * self.scale)
98