Skip to content

Commit a633a95

Browse files
committed
changed names of gradient member functions
1 parent ce9310d commit a633a95

File tree

6 files changed

+50
-78
lines changed

6 files changed

+50
-78
lines changed

pymc/Node.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -28,15 +28,13 @@ def logp_of_set(s):
2828
else:
2929
raise exc[0], exc[1], exc[2]
3030

31-
def grad_logp_of_set(variable_set, calculation_set = None):
31+
def logp_gradient_of_set(variable_set, calculation_set = None):
3232

33-
grad_logp = {}
33+
logp_gradient = {}
3434
for var in variable_set:
35-
gradient = var.gradient(calculation_set)
36-
37-
grad_logp[var] = gradient
35+
logp_gradient[var] = var.logp_gradient_contribution(calculation_set)
3836

39-
return grad_logp
37+
return logp_gradient
4038

4139

4240

pymc/PyMCObjects.py

Lines changed: 25 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -235,13 +235,13 @@ def A(x = B, y = C):
235235
236236
:SeeAlso: Stochastic, Node, LazyFunction, stoch, dtrm, data, Model, Container
237237
"""
238-
def __init__(self, logp, doc, name, parents, cache_depth=2, plot=None, verbose=None, grad_logps = {}):
238+
def __init__(self, logp, doc, name, parents, cache_depth=2, plot=None, verbose=None, logp_partial_gradients = {}):
239239

240240
self.ParentDict = ParentDict
241241

242242
# This function gets used to evaluate self's value.
243243
self._logp_fun = logp
244-
self._grad_logp_functions = grad_logps
244+
self._logp_partial_gradients_functions = logp_partial_gradients
245245

246246

247247
self.errmsg = "Potential %s forbids its parents' current values"%name
@@ -271,14 +271,14 @@ def gen_lazy_function(self):
271271
cache_depth = self._cache_depth)
272272
self._logp.force_compute()
273273

274-
self._grad_logps = {}
275-
for parameter, function in self._grad_logp_functions.iteritems():
276-
lazy_grad_logp = LazyFunction(fun = function,
274+
self._logp_partial_gradients= {}
275+
for parameter, function in self._logp_partial_gradients_functions.iteritems():
276+
lazy_logp_partial_gradients = LazyFunction(fun = function,
277277
arguments = self.parents,
278278
ultimate_args = self.extended_parents,
279279
cache_depth = self._cache_depth)
280-
lazy_grad_logp.force_compute()
281-
self._grad_logps[parameter] = lazy_grad_logp
280+
lazy_logp_partial_gradients.force_compute()
281+
self._logp_partial_gradients[parameter] = lazy_logp_partial_gradients
282282

283283
def get_logp(self):
284284
if self.verbose > 1:
@@ -309,7 +309,7 @@ def set_logp(self,value):
309309

310310
logp = property(fget = get_logp, fset=set_logp, doc="Self's log-probability value conditional on parents.")
311311

312-
def grad_logp(self, calculation_set = None):
312+
def logp_partial_gradient(self, calculation_set = None):
313313
gradient = 0
314314
if self in calculation_set:
315315

@@ -320,37 +320,14 @@ def grad_logp(self, calculation_set = None):
320320

321321
if value is variable:
322322
try :
323-
grad_func = self._grad_logps[parameter]
323+
grad_func = self._logp_partial_gradients[parameter]
324324
except KeyError:
325325
raise NotImplementedError(repr(self) + " has no gradient function for parameter " + parameter)
326326

327327
gradient = gradient + grad_func.get()
328328

329-
#_grad_logp can return either raveled or non-raveled values, but they should be consistent
330329
return np.reshape(gradient, np.shape(variable.value)) #np.reshape(gradient, np.shape(variable.value))
331330

332-
333-
334-
def parameter_value_dict(p, parameters):
335-
for parameter, value in parameters.iteritems():
336-
if isinstance(value, Variable):
337-
p[parameter] = value.value
338-
else:
339-
p[parameter] = value
340-
return p
341-
# should be able to be replaced in python 3.0 by
342-
#def params(parameters):
343-
# return dict( parameter : get_val(value) for parameter, value in parameters.iteritems())
344-
345-
#def get_val(value):
346-
# if isinstance(value, Variable):
347-
# return value.value
348-
# else:
349-
# return value
350-
351-
352-
353-
354331
class Deterministic(DeterministicBase):
355332
"""
356333
A variable whose value is determined by the values of its parents.
@@ -460,23 +437,21 @@ def set_value(self,value):
460437

461438
value = property(fget = get_value, fset=set_value, doc="Self's value computed from current values of parents.")
462439

463-
def grad_logp(self, variable, calculation_set = None):
440+
def logp_partial_gradient(self, variable, calculation_set = None):
464441
"""
465442
gets the logp gradient of this deterministic with respect to variable
466443
"""
467444
if self.verbose > 0:
468-
print '\t' + self.__name__ + ': grad_logp accessed.'
445+
print '\t' + self.__name__ + ': logp_partial_gradient accessed.'
469446

470447
if not (variable.dtype in float_dtypes and self.dtype in float_dtypes):
471448
return zeros(shape(variable.value))
472449

473450
#gradient = 0
474451

475452
# loop through all the parameters and add up all the gradients of log p with respect to the approrpiate variable
476-
gradient = __builtin__.sum([child.grad_logp(self, calculation_set) for child in self.children ])
477-
#for child in self.children:
478-
# gradient = gradient + child.grad_logp(self, calculation_set)
479-
453+
gradient = __builtin__.sum([child.logp_partial_gradient(self, calculation_set) for child in self.children ])
454+
480455
totalGradient = 0
481456
for parameter, value in self.parents.iteritems():
482457
if value is variable:
@@ -654,7 +629,7 @@ def __init__( self,
654629
verbose = None,
655630
isdata=None,
656631
check_logp=True,
657-
grad_logps = {}):
632+
logp_partial_gradients = {}):
658633

659634
self.counter = Counter()
660635
self.ParentDict = ParentDict
@@ -673,7 +648,7 @@ def __init__( self,
673648
self._logp_fun = logp
674649

675650
#This function will be used to evaluate self's gradient of log probability.
676-
self._grad_logp_functions = grad_logps
651+
self._logp_partial_gradient_functions = logp_partial_gradients
677652

678653
# This function will be used to draw values for self conditional on self's parents.
679654
self._random = random
@@ -753,14 +728,14 @@ def gen_lazy_function(self):
753728
self._logp.force_compute()
754729

755730

756-
self._grad_logps = {}
757-
for parameter, function in self._grad_logp_functions.iteritems():
758-
lazy_grad_logp = LazyFunction(fun = function,
731+
self._logp_partial_gradients = {}
732+
for parameter, function in self._logp_partial_gradient_functions.iteritems():
733+
lazy_logp_partial_gradient = LazyFunction(fun = function,
759734
arguments = arguments,
760735
ultimate_args = self.extended_parents | set([self]),
761736
cache_depth = self._cache_depth)
762-
lazy_grad_logp.force_compute()
763-
self._grad_logps[parameter] = lazy_grad_logp
737+
lazy_logp_partial_gradient.force_compute()
738+
self._logp_partial_gradients[parameter] = lazy_logp_partial_gradient
764739

765740
def get_value(self):
766741
# Define value attribute
@@ -844,19 +819,17 @@ def set_logp(self, new_logp):
844819

845820

846821

847-
def gradient(self, calculation_set = None):
822+
def logp_gradient_contribution(self, calculation_set = None):
848823
"""
849824
Calculates the gradient of the joint log posterior with respect to self.
850825
Calculation of the log posterior is restricted to the variables in calculation_set.
851826
"""
852827
#NEED some sort of check to see if the log p calculation has recently failed, in which case not to continue
853828

854-
gradient = self.grad_logp(self, calculation_set) + __builtin__.sum([child.grad_logp(self, calculation_set) for child in self.children] )
829+
return self.logp_partial_gradient(self, calculation_set) + __builtin__.sum([child.logp_partial_gradient(self, calculation_set) for child in self.children] )
855830

856-
857-
return gradient
858831

859-
def grad_logp(self, variable, calculation_set = None):
832+
def logp_partial_gradient(self, variable, calculation_set = None):
860833
"""
861834
Calculates the partial gradient of the posterior of self with respect to variable.
862835
Returns zero if self is not in calculation_set.
@@ -869,7 +842,7 @@ def grad_logp(self, variable, calculation_set = None):
869842

870843
if variable is self:
871844
try :
872-
gradient_func = self._grad_logps['value']
845+
gradient_func = self._logp_partial_gradients['value']
873846

874847
except KeyError:
875848
raise NotImplementedError(repr(self) + " has no gradient function for 'value'")
@@ -885,7 +858,7 @@ def grad_logp(self, variable, calculation_set = None):
885858
def _pgradient(self, variable, parameter, value):
886859
if value is variable:
887860
try :
888-
return np.reshape(self._grad_logps[parameter].get(), np.shape(variable.value))
861+
return np.reshape(self._logp_partial_gradients[parameter].get(), np.shape(variable.value))
889862
except KeyError:
890863
raise NotImplementedError(repr(self) + " has no gradient function for parameter " + parameter)
891864
else:

pymc/StepMethods.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from __future__ import division
22

33
import numpy as np
4-
from utils import msqrt, check_type, round_array, float_dtypes, integer_dtypes, bool_dtypes, safe_len, find_generations, logp_of_set, symmetrize, grad_logp_of_set
4+
from utils import msqrt, check_type, round_array, float_dtypes, integer_dtypes, bool_dtypes, safe_len, find_generations, logp_of_set, symmetrize, logp_gradient_of_set
55
from numpy import ones, zeros, log, shape, cov, ndarray, inner, reshape, sqrt, any, array, all, abs, exp, where, isscalar, iterable, multiply, transpose, tri
66
from numpy.linalg.linalg import LinAlgError
77
from numpy.linalg import pinv, cholesky
@@ -280,10 +280,10 @@ def _get_logp_plus_loglike(self):
280280
# Make get property for retrieving log-probability
281281
logp_plus_loglike = property(fget = _get_logp_plus_loglike, doc="The summed log-probability of all stochastic variables that depend on \n self.stochastics, and self.stochastics.")
282282

283-
def _get_grad_logp(self):
284-
return grad_logp_of_set(self.stochastics, self.markov_blanket)
283+
def _get_logp_gradient(self):
284+
return logp_gradient_of_set(self.stochastics, self.markov_blanket)
285285

286-
grad_logp = property(fget = _get_grad_logp)
286+
logp_gradient = property(fget = _get_logp_gradient)
287287

288288
def current_state(self):
289289
"""Return a dictionary with the current value of the variables defining

pymc/distributions.py

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -117,11 +117,11 @@ def new_dist_class(*new_class_args):
117117
stochastic_from_dist
118118
"""
119119

120-
(dtype, name, parent_names, parents_default, docstr, logp, random, mv, grad_logps) = new_class_args
120+
(dtype, name, parent_names, parents_default, docstr, logp, random, mv, logp_partial_gradients) = new_class_args
121121
class new_class(Stochastic):
122122
__doc__ = docstr
123123
def __init__(self, *args, **kwds):
124-
(dtype, name, parent_names, parents_default, docstr, logp, random, mv, grad_logps) = new_class_args
124+
(dtype, name, parent_names, parents_default, docstr, logp, random, mv, logp_partial_gradients) = new_class_args
125125
parents=parents_default
126126

127127
# Figure out what argument names are needed.
@@ -241,15 +241,15 @@ def shape_error():
241241
logp = debug_wrapper(logp)
242242
random = debug_wrapper(random)
243243
else:
244-
Stochastic.__init__(self, logp=logp, random=random, grad_logps = grad_logps, dtype=dtype, **arg_dict_out)
244+
Stochastic.__init__(self, logp=logp, random=random, logp_partial_gradients = logp_partial_gradients, dtype=dtype, **arg_dict_out)
245245

246246
new_class.__name__ = name
247247
new_class.parent_names = parent_names
248248

249249
return new_class
250250

251251

252-
def stochastic_from_dist(name, logp, random=None, grad_logp={}, dtype=np.float, mv=False):
252+
def stochastic_from_dist(name, logp, random=None, logp_partial_gradients={}, dtype=np.float, mv=False):
253253
"""
254254
Return a Stochastic subclass made from a particular distribution.
255255
@@ -303,12 +303,13 @@ def stochastic_from_dist(name, logp, random=None, grad_logp={}, dtype=np.float,
303303
logp=valuewrapper(logp)
304304
distribution_arguments = logp.__dict__
305305

306-
wrapped_grad_logps = {}
306+
wrapped_logp_partial_gradients = {}
307307

308-
for parameter, func in grad_logp.iteritems():
309-
wrapped_grad_logps[parameter] = valuewrapper(grad_logp[parameter], arguments = distribution_arguments)
308+
print logp_partial_gradients
309+
for parameter, func in logp_partial_gradients.iteritems():
310+
wrapped_logp_partial_gradients[parameter] = valuewrapper(logp_partial_gradients[parameter], arguments = distribution_arguments)
310311

311-
return new_dist_class(dtype, name, parent_names, parents_default, docstr, logp, random, mv, wrapped_grad_logps)
312+
return new_dist_class(dtype, name, parent_names, parents_default, docstr, logp, random, mv, wrapped_logp_partial_gradients)
312313

313314

314315
#-------------------------------------------------------------
@@ -2695,23 +2696,23 @@ def local_decorated_likelihoods(obj):
26952696

26962697
for dist in sc_continuous_distributions:
26972698
dist_logp, dist_random, grad_logp = name_to_funcs(dist, locals())
2698-
locals()[capitalize(dist)]= stochastic_from_dist(dist, dist_logp, dist_random, grad_logp)
2699+
locals()[capitalize(dist)]= stochastic_from_dist(dist, dist_logp, dist_random, logp_partial_gradients = grad_logp)
26992700

27002701
for dist in mv_continuous_distributions:
27012702
dist_logp, dist_random, grad_logp = name_to_funcs(dist, locals())
2702-
locals()[capitalize(dist)]= stochastic_from_dist(dist, dist_logp, dist_random, grad_logp = grad_logp, mv=True)
2703+
locals()[capitalize(dist)]= stochastic_from_dist(dist, dist_logp, dist_random, logp_partial_gradients = grad_logp, mv=True)
27032704

27042705
for dist in sc_discrete_distributions:
27052706
dist_logp, dist_random, grad_logp = name_to_funcs(dist, locals())
2706-
locals()[capitalize(dist)]= stochastic_from_dist(dist, dist_logp, dist_random, grad_logp = grad_logp, dtype=np.int)
2707+
locals()[capitalize(dist)]= stochastic_from_dist(dist, dist_logp, dist_random, logp_partial_gradients = grad_logp, dtype=np.int)
27072708

27082709
for dist in mv_discrete_distributions:
27092710
dist_logp, dist_random, grad_logp = name_to_funcs(dist, locals())
2710-
locals()[capitalize(dist)]= stochastic_from_dist(dist, dist_logp, dist_random, grad_logp = grad_logp, dtype=np.int, mv=True)
2711+
locals()[capitalize(dist)]= stochastic_from_dist(dist, dist_logp, dist_random, logp_partial_gradients = grad_logp, dtype=np.int, mv=True)
27112712

27122713

27132714
dist_logp, dist_random, grad_logp = name_to_funcs('bernoulli', locals())
2714-
Bernoulli = stochastic_from_dist('bernoulli', dist_logp, dist_random,grad_logp, dtype=np.bool)
2715+
Bernoulli = stochastic_from_dist('bernoulli', dist_logp, dist_random,logp_partial_gradients = grad_logp, dtype=np.bool)
27152716

27162717

27172718
def uninformative_like(x):

pymc/tests/test_gradients.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -192,7 +192,7 @@ def test_numpy_deterministics_jacobians(self):
192192
def check_gradients(self, stochastic):
193193

194194
stochastics = find_variable_set(stochastic)
195-
gradients = utils.grad_logp_of_set(stochastics, stochastics)
195+
gradients = utils.logp_gradient_of_set(stochastics, stochastics)
196196

197197
for s, analytic_gradient in gradients.iteritems():
198198

@@ -324,7 +324,7 @@ def check_model_gradients(self, model):
324324
markov_blanket = list(model)+list(children)
325325

326326

327-
gradients = utils.grad_logp_of_set(model)
327+
gradients = utils.logp_gradient_of_set(model)
328328
for variable in model:
329329

330330
analytic_gradient = gradients[variable]

pymc/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
import pdb
1313
from numpy.linalg.linalg import LinAlgError
1414
from numpy.linalg import cholesky, eigh, det, inv
15-
from Node import logp_of_set, grad_logp_of_set
15+
from Node import logp_of_set, logp_gradient_of_set
1616
import types
1717

1818
from numpy import sqrt, obj2sctype, ndarray, asmatrix, array, pi, prod, exp,\

0 commit comments

Comments
 (0)