1# mypy: allow-untyped-defs 2from torch.distributions import constraints 3from torch.distributions.exponential import Exponential 4from torch.distributions.transformed_distribution import TransformedDistribution 5from torch.distributions.transforms import AffineTransform, ExpTransform 6from torch.distributions.utils import broadcast_all 7 8 9__all__ = ["Pareto"] 10 11 12class Pareto(TransformedDistribution): 13 r""" 14 Samples from a Pareto Type 1 distribution. 15 16 Example:: 17 18 >>> # xdoctest: +IGNORE_WANT("non-deterministic") 19 >>> m = Pareto(torch.tensor([1.0]), torch.tensor([1.0])) 20 >>> m.sample() # sample from a Pareto distribution with scale=1 and alpha=1 21 tensor([ 1.5623]) 22 23 Args: 24 scale (float or Tensor): Scale parameter of the distribution 25 alpha (float or Tensor): Shape parameter of the distribution 26 """ 27 arg_constraints = {"alpha": constraints.positive, "scale": constraints.positive} 28 29 def __init__(self, scale, alpha, validate_args=None): 30 self.scale, self.alpha = broadcast_all(scale, alpha) 31 base_dist = Exponential(self.alpha, validate_args=validate_args) 32 transforms = [ExpTransform(), AffineTransform(loc=0, scale=self.scale)] 33 super().__init__(base_dist, transforms, validate_args=validate_args) 34 35 def expand(self, batch_shape, _instance=None): 36 new = self._get_checked_instance(Pareto, _instance) 37 new.scale = self.scale.expand(batch_shape) 38 new.alpha = self.alpha.expand(batch_shape) 39 return super().expand(batch_shape, _instance=new) 40 41 @property 42 def mean(self): 43 # mean is inf for alpha <= 1 44 a = self.alpha.clamp(min=1) 45 return a * self.scale / (a - 1) 46 47 @property 48 def mode(self): 49 return self.scale 50 51 @property 52 def variance(self): 53 # var is inf for alpha <= 2 54 a = self.alpha.clamp(min=2) 55 return self.scale.pow(2) * a / ((a - 1).pow(2) * (a - 2)) 56 57 @constraints.dependent_property(is_discrete=False, event_dim=0) 58 def support(self): 59 return constraints.greater_than_eq(self.scale) 60 61 def entropy(self): 62 return (self.scale / self.alpha).log() + (1 + self.alpha.reciprocal()) 63