@@ -235,13 +235,13 @@ def A(x = B, y = C):
235
235
236
236
:SeeAlso: Stochastic, Node, LazyFunction, stoch, dtrm, data, Model, Container
237
237
"""
238
- def __init__ (self , logp , doc , name , parents , cache_depth = 2 , plot = None , verbose = None , grad_logps = {}):
238
+ def __init__ (self , logp , doc , name , parents , cache_depth = 2 , plot = None , verbose = None , logp_partial_gradients = {}):
239
239
240
240
self .ParentDict = ParentDict
241
241
242
242
# This function gets used to evaluate self's value.
243
243
self ._logp_fun = logp
244
- self ._grad_logp_functions = grad_logps
244
+ self ._logp_partial_gradients_functions = logp_partial_gradients
245
245
246
246
247
247
self .errmsg = "Potential %s forbids its parents' current values" % name
@@ -271,14 +271,14 @@ def gen_lazy_function(self):
271
271
cache_depth = self ._cache_depth )
272
272
self ._logp .force_compute ()
273
273
274
- self ._grad_logps = {}
275
- for parameter , function in self ._grad_logp_functions .iteritems ():
276
- lazy_grad_logp = LazyFunction (fun = function ,
274
+ self ._logp_partial_gradients = {}
275
+ for parameter , function in self ._logp_partial_gradients_functions .iteritems ():
276
+ lazy_logp_partial_gradients = LazyFunction (fun = function ,
277
277
arguments = self .parents ,
278
278
ultimate_args = self .extended_parents ,
279
279
cache_depth = self ._cache_depth )
280
- lazy_grad_logp .force_compute ()
281
- self ._grad_logps [parameter ] = lazy_grad_logp
280
+ lazy_logp_partial_gradients .force_compute ()
281
+ self ._logp_partial_gradients [parameter ] = lazy_logp_partial_gradients
282
282
283
283
def get_logp (self ):
284
284
if self .verbose > 1 :
@@ -309,7 +309,7 @@ def set_logp(self,value):
309
309
310
310
logp = property (fget = get_logp , fset = set_logp , doc = "Self's log-probability value conditional on parents." )
311
311
312
- def grad_logp (self , calculation_set = None ):
312
+ def logp_partial_gradient (self , calculation_set = None ):
313
313
gradient = 0
314
314
if self in calculation_set :
315
315
@@ -320,37 +320,14 @@ def grad_logp(self, calculation_set = None):
320
320
321
321
if value is variable :
322
322
try :
323
- grad_func = self ._grad_logps [parameter ]
323
+ grad_func = self ._logp_partial_gradients [parameter ]
324
324
except KeyError :
325
325
raise NotImplementedError (repr (self ) + " has no gradient function for parameter " + parameter )
326
326
327
327
gradient = gradient + grad_func .get ()
328
328
329
- #_grad_logp can return either raveled or non-raveled values, but they should be consistent
330
329
return np .reshape (gradient , np .shape (variable .value )) #np.reshape(gradient, np.shape(variable.value))
331
330
332
-
333
-
334
- def parameter_value_dict (p , parameters ):
335
- for parameter , value in parameters .iteritems ():
336
- if isinstance (value , Variable ):
337
- p [parameter ] = value .value
338
- else :
339
- p [parameter ] = value
340
- return p
341
- # should be able to be replaced in python 3.0 by
342
- #def params(parameters):
343
- # return dict( parameter : get_val(value) for parameter, value in parameters.iteritems())
344
-
345
- #def get_val(value):
346
- # if isinstance(value, Variable):
347
- # return value.value
348
- # else:
349
- # return value
350
-
351
-
352
-
353
-
354
331
class Deterministic (DeterministicBase ):
355
332
"""
356
333
A variable whose value is determined by the values of its parents.
@@ -460,23 +437,21 @@ def set_value(self,value):
460
437
461
438
value = property (fget = get_value , fset = set_value , doc = "Self's value computed from current values of parents." )
462
439
463
- def grad_logp (self , variable , calculation_set = None ):
440
+ def logp_partial_gradient (self , variable , calculation_set = None ):
464
441
"""
465
442
gets the logp gradient of this deterministic with respect to variable
466
443
"""
467
444
if self .verbose > 0 :
468
- print '\t ' + self .__name__ + ': grad_logp accessed.'
445
+ print '\t ' + self .__name__ + ': logp_partial_gradient accessed.'
469
446
470
447
if not (variable .dtype in float_dtypes and self .dtype in float_dtypes ):
471
448
return zeros (shape (variable .value ))
472
449
473
450
#gradient = 0
474
451
475
452
# loop through all the parameters and add up all the gradients of log p with respect to the approrpiate variable
476
- gradient = __builtin__ .sum ([child .grad_logp (self , calculation_set ) for child in self .children ])
477
- #for child in self.children:
478
- # gradient = gradient + child.grad_logp(self, calculation_set)
479
-
453
+ gradient = __builtin__ .sum ([child .logp_partial_gradient (self , calculation_set ) for child in self .children ])
454
+
480
455
totalGradient = 0
481
456
for parameter , value in self .parents .iteritems ():
482
457
if value is variable :
@@ -654,7 +629,7 @@ def __init__( self,
654
629
verbose = None ,
655
630
isdata = None ,
656
631
check_logp = True ,
657
- grad_logps = {}):
632
+ logp_partial_gradients = {}):
658
633
659
634
self .counter = Counter ()
660
635
self .ParentDict = ParentDict
@@ -673,7 +648,7 @@ def __init__( self,
673
648
self ._logp_fun = logp
674
649
675
650
#This function will be used to evaluate self's gradient of log probability.
676
- self ._grad_logp_functions = grad_logps
651
+ self ._logp_partial_gradient_functions = logp_partial_gradients
677
652
678
653
# This function will be used to draw values for self conditional on self's parents.
679
654
self ._random = random
@@ -753,14 +728,14 @@ def gen_lazy_function(self):
753
728
self ._logp .force_compute ()
754
729
755
730
756
- self ._grad_logps = {}
757
- for parameter , function in self ._grad_logp_functions .iteritems ():
758
- lazy_grad_logp = LazyFunction (fun = function ,
731
+ self ._logp_partial_gradients = {}
732
+ for parameter , function in self ._logp_partial_gradient_functions .iteritems ():
733
+ lazy_logp_partial_gradient = LazyFunction (fun = function ,
759
734
arguments = arguments ,
760
735
ultimate_args = self .extended_parents | set ([self ]),
761
736
cache_depth = self ._cache_depth )
762
- lazy_grad_logp .force_compute ()
763
- self ._grad_logps [parameter ] = lazy_grad_logp
737
+ lazy_logp_partial_gradient .force_compute ()
738
+ self ._logp_partial_gradients [parameter ] = lazy_logp_partial_gradient
764
739
765
740
def get_value (self ):
766
741
# Define value attribute
@@ -844,19 +819,17 @@ def set_logp(self, new_logp):
844
819
845
820
846
821
847
- def gradient (self , calculation_set = None ):
822
+ def logp_gradient_contribution (self , calculation_set = None ):
848
823
"""
849
824
Calculates the gradient of the joint log posterior with respect to self.
850
825
Calculation of the log posterior is restricted to the variables in calculation_set.
851
826
"""
852
827
#NEED some sort of check to see if the log p calculation has recently failed, in which case not to continue
853
828
854
- gradient = self .grad_logp (self , calculation_set ) + __builtin__ .sum ([child .grad_logp (self , calculation_set ) for child in self .children ] )
829
+ return self .logp_partial_gradient (self , calculation_set ) + __builtin__ .sum ([child .logp_partial_gradient (self , calculation_set ) for child in self .children ] )
855
830
856
-
857
- return gradient
858
831
859
- def grad_logp (self , variable , calculation_set = None ):
832
+ def logp_partial_gradient (self , variable , calculation_set = None ):
860
833
"""
861
834
Calculates the partial gradient of the posterior of self with respect to variable.
862
835
Returns zero if self is not in calculation_set.
@@ -869,7 +842,7 @@ def grad_logp(self, variable, calculation_set = None):
869
842
870
843
if variable is self :
871
844
try :
872
- gradient_func = self ._grad_logps ['value' ]
845
+ gradient_func = self ._logp_partial_gradients ['value' ]
873
846
874
847
except KeyError :
875
848
raise NotImplementedError (repr (self ) + " has no gradient function for 'value'" )
@@ -885,7 +858,7 @@ def grad_logp(self, variable, calculation_set = None):
885
858
def _pgradient (self , variable , parameter , value ):
886
859
if value is variable :
887
860
try :
888
- return np .reshape (self ._grad_logps [parameter ].get (), np .shape (variable .value ))
861
+ return np .reshape (self ._logp_partial_gradients [parameter ].get (), np .shape (variable .value ))
889
862
except KeyError :
890
863
raise NotImplementedError (repr (self ) + " has no gradient function for parameter " + parameter )
891
864
else :
0 commit comments