pytorch
130 строк · 4.6 Кб
1# mypy: allow-untyped-defs
2from numbers import Number
3
4import torch
5from torch.distributions import constraints
6from torch.distributions.distribution import Distribution
7from torch.distributions.utils import (
8broadcast_all,
9lazy_property,
10logits_to_probs,
11probs_to_logits,
12)
13from torch.nn.functional import binary_cross_entropy_with_logits
14
15
16__all__ = ["Geometric"]
17
18
19class Geometric(Distribution):
20r"""
21Creates a Geometric distribution parameterized by :attr:`probs`,
22where :attr:`probs` is the probability of success of Bernoulli trials.
23
24.. math::
25
26P(X=k) = (1-p)^{k} p, k = 0, 1, ...
27
28.. note::
29:func:`torch.distributions.geometric.Geometric` :math:`(k+1)`-th trial is the first success
30hence draws samples in :math:`\{0, 1, \ldots\}`, whereas
31:func:`torch.Tensor.geometric_` `k`-th trial is the first success hence draws samples in :math:`\{1, 2, \ldots\}`.
32
33Example::
34
35>>> # xdoctest: +IGNORE_WANT("non-deterministic")
36>>> m = Geometric(torch.tensor([0.3]))
37>>> m.sample() # underlying Bernoulli has 30% chance 1; 70% chance 0
38tensor([ 2.])
39
40Args:
41probs (Number, Tensor): the probability of sampling `1`. Must be in range (0, 1]
42logits (Number, Tensor): the log-odds of sampling `1`.
43"""
44arg_constraints = {"probs": constraints.unit_interval, "logits": constraints.real}
45support = constraints.nonnegative_integer
46
47def __init__(self, probs=None, logits=None, validate_args=None):
48if (probs is None) == (logits is None):
49raise ValueError(
50"Either `probs` or `logits` must be specified, but not both."
51)
52if probs is not None:
53(self.probs,) = broadcast_all(probs)
54else:
55(self.logits,) = broadcast_all(logits)
56probs_or_logits = probs if probs is not None else logits
57if isinstance(probs_or_logits, Number):
58batch_shape = torch.Size()
59else:
60batch_shape = probs_or_logits.size()
61super().__init__(batch_shape, validate_args=validate_args)
62if self._validate_args and probs is not None:
63# Add an extra check beyond unit_interval
64value = self.probs
65valid = value > 0
66if not valid.all():
67invalid_value = value.data[~valid]
68raise ValueError(
69"Expected parameter probs "
70f"({type(value).__name__} of shape {tuple(value.shape)}) "
71f"of distribution {repr(self)} "
72f"to be positive but found invalid values:\n{invalid_value}"
73)
74
75def expand(self, batch_shape, _instance=None):
76new = self._get_checked_instance(Geometric, _instance)
77batch_shape = torch.Size(batch_shape)
78if "probs" in self.__dict__:
79new.probs = self.probs.expand(batch_shape)
80if "logits" in self.__dict__:
81new.logits = self.logits.expand(batch_shape)
82super(Geometric, new).__init__(batch_shape, validate_args=False)
83new._validate_args = self._validate_args
84return new
85
86@property
87def mean(self):
88return 1.0 / self.probs - 1.0
89
90@property
91def mode(self):
92return torch.zeros_like(self.probs)
93
94@property
95def variance(self):
96return (1.0 / self.probs - 1.0) / self.probs
97
98@lazy_property
99def logits(self):
100return probs_to_logits(self.probs, is_binary=True)
101
102@lazy_property
103def probs(self):
104return logits_to_probs(self.logits, is_binary=True)
105
106def sample(self, sample_shape=torch.Size()):
107shape = self._extended_shape(sample_shape)
108tiny = torch.finfo(self.probs.dtype).tiny
109with torch.no_grad():
110if torch._C._get_tracing_state():
111# [JIT WORKAROUND] lack of support for .uniform_()
112u = torch.rand(shape, dtype=self.probs.dtype, device=self.probs.device)
113u = u.clamp(min=tiny)
114else:
115u = self.probs.new(shape).uniform_(tiny, 1)
116return (u.log() / (-self.probs).log1p()).floor()
117
118def log_prob(self, value):
119if self._validate_args:
120self._validate_sample(value)
121value, probs = broadcast_all(value, self.probs)
122probs = probs.clone(memory_format=torch.contiguous_format)
123probs[(probs == 1) & (value == 0)] = 0
124return value * (-probs).log1p() + self.probs.log()
125
126def entropy(self):
127return (
128binary_cross_entropy_with_logits(self.logits, self.probs, reduction="none")
129/ self.probs
130)
131