1# Copyright 2016 The TensorFlow Authors. All Rights Reserved. 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================== 15"""The Exponential distribution class.""" 16 17from __future__ import absolute_import 18from __future__ import division 19from __future__ import print_function 20 21import numpy as np 22 23from tensorflow.python.framework import dtypes 24from tensorflow.python.framework import ops 25from tensorflow.python.ops import array_ops 26from tensorflow.python.ops import math_ops 27from tensorflow.python.ops import nn 28from tensorflow.python.ops import random_ops 29from tensorflow.python.ops.distributions import gamma 30from tensorflow.python.util.tf_export import tf_export 31 32 33__all__ = [ 34 "Exponential", 35 "ExponentialWithSoftplusRate", 36] 37 38 39@tf_export("distributions.Exponential") 40class Exponential(gamma.Gamma): 41 """Exponential distribution. 42 43 The Exponential distribution is parameterized by an event `rate` parameter. 44 45 #### Mathematical Details 46 47 The probability density function (pdf) is, 48 49 ```none 50 pdf(x; lambda, x > 0) = exp(-lambda x) / Z 51 Z = 1 / lambda 52 ``` 53 54 where `rate = lambda` and `Z` is the normalizaing constant. 55 56 The Exponential distribution is a special case of the Gamma distribution, 57 i.e., 58 59 ```python 60 Exponential(rate) = Gamma(concentration=1., rate) 61 ``` 62 63 The Exponential distribution uses a `rate` parameter, or "inverse scale", 64 which can be intuited as, 65 66 ```none 67 X ~ Exponential(rate=1) 68 Y = X / rate 69 ``` 70 71 """ 72 73 def __init__(self, 74 rate, 75 validate_args=False, 76 allow_nan_stats=True, 77 name="Exponential"): 78 """Construct Exponential distribution with parameter `rate`. 79 80 Args: 81 rate: Floating point tensor, equivalent to `1 / mean`. Must contain only 82 positive values. 83 validate_args: Python `bool`, default `False`. When `True` distribution 84 parameters are checked for validity despite possibly degrading runtime 85 performance. When `False` invalid inputs may silently render incorrect 86 outputs. 87 allow_nan_stats: Python `bool`, default `True`. When `True`, statistics 88 (e.g., mean, mode, variance) use the value "`NaN`" to indicate the 89 result is undefined. When `False`, an exception is raised if one or 90 more of the statistic's batch members are undefined. 91 name: Python `str` name prefixed to Ops created by this class. 92 """ 93 parameters = locals() 94 # Even though all statistics of are defined for valid inputs, this is not 95 # true in the parent class "Gamma." Therefore, passing 96 # allow_nan_stats=True 97 # through to the parent class results in unnecessary asserts. 98 with ops.name_scope(name, values=[rate]): 99 self._rate = ops.convert_to_tensor(rate, name="rate") 100 super(Exponential, self).__init__( 101 concentration=array_ops.ones([], dtype=self._rate.dtype), 102 rate=self._rate, 103 allow_nan_stats=allow_nan_stats, 104 validate_args=validate_args, 105 name=name) 106 # While the Gamma distribution is not reparameterizable, the exponential 107 # distribution is. 108 self._reparameterization_type = True 109 self._parameters = parameters 110 self._graph_parents += [self._rate] 111 112 @staticmethod 113 def _param_shapes(sample_shape): 114 return {"rate": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)} 115 116 @property 117 def rate(self): 118 return self._rate 119 120 def _sample_n(self, n, seed=None): 121 shape = array_ops.concat([[n], array_ops.shape(self._rate)], 0) 122 # Uniform variates must be sampled from the open-interval `(0, 1)` rather 123 # than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny` 124 # because it is the smallest, positive, "normal" number. A "normal" number 125 # is such that the mantissa has an implicit leading 1. Normal, positive 126 # numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In 127 # this case, a subnormal number (i.e., np.nextafter) can cause us to sample 128 # 0. 129 sampled = random_ops.random_uniform( 130 shape, 131 minval=np.finfo(self.dtype.as_numpy_dtype).tiny, 132 maxval=1., 133 seed=seed, 134 dtype=self.dtype) 135 return -math_ops.log(sampled) / self._rate 136 137 138class ExponentialWithSoftplusRate(Exponential): 139 """Exponential with softplus transform on `rate`.""" 140 141 def __init__(self, 142 rate, 143 validate_args=False, 144 allow_nan_stats=True, 145 name="ExponentialWithSoftplusRate"): 146 parameters = locals() 147 with ops.name_scope(name, values=[rate]): 148 super(ExponentialWithSoftplusRate, self).__init__( 149 rate=nn.softplus(rate, name="softplus_rate"), 150 validate_args=validate_args, 151 allow_nan_stats=allow_nan_stats, 152 name=name) 153 self._parameters = parameters 154