@@ -437,6 +437,21 @@ def set_value(self,value):
437
437
438
438
value = property (fget = get_value , fset = set_value , doc = "Self's value computed from current values of parents." )
439
439
440
+ def apply_jacobian (self , parameter , variable , gradient ):
441
+ try :
442
+ jacobian_func = self ._jacobians [parameter ]
443
+ except KeyError :
444
+ raise NotImplementedError (repr (self ) + " has no jacobian function for parameter " + parameter )
445
+
446
+ jacobian = jacobian_func .get ()
447
+
448
+
449
+ mapping = self ._jacobian_formats .get (parameter , 'full' )
450
+
451
+
452
+ return self ._format_mapping [mapping ](self , variable , jacobian , gradient )
453
+
454
+
440
455
def logp_partial_gradient (self , variable , calculation_set = None ):
441
456
"""
442
457
gets the logp gradient of this deterministic with respect to variable
@@ -446,28 +461,15 @@ def logp_partial_gradient(self, variable, calculation_set = None):
446
461
447
462
if not (variable .dtype in float_dtypes and self .dtype in float_dtypes ):
448
463
return zeros (shape (variable .value ))
449
-
450
- #gradient = 0
451
464
452
465
# loop through all the parameters and add up all the gradients of log p with respect to the approrpiate variable
453
466
gradient = __builtin__ .sum ([child .logp_partial_gradient (self , calculation_set ) for child in self .children ])
454
467
455
468
totalGradient = 0
456
469
for parameter , value in self .parents .iteritems ():
457
470
if value is variable :
458
-
459
- try :
460
- jacobian_func = self ._jacobians [parameter ]
461
- except KeyError :
462
- raise NotImplementedError (repr (self ) + " has no jacobian function for parameter " + parameter )
463
-
464
- jacobian = jacobian_func .get ()
465
-
466
-
467
- mapping = self ._jacobian_formats .get (parameter , 'full' )
468
-
469
471
470
- totalGradient += self ._format_mapping [ mapping ]( self , variable , jacobian , gradient )
472
+ totalGradient += self .apply_gradient ( parameter , variable , gradient )
471
473
472
474
return np .reshape (totalGradient , shape (variable .value ))
473
475
@@ -478,20 +480,20 @@ def full_jacobian(self, variable, jacobian, gradient):
478
480
def transformation_operation_jacobian (self , variable , jacobian , gradient ):
479
481
return jacobian * gradient
480
482
481
- _bog_history = {}
483
+ _BO_history = {}
482
484
def broadcast_operation_jacobian (self , variable , jacobian , gradient ):
483
485
484
486
tgradient = jacobian * gradient
485
487
try :
486
- axes , lx = self ._bog_history [id (variable )]
488
+ axes , lx = self ._BO_history [id (variable )]
487
489
except KeyError :
488
490
sshape = array (shape (self .value ))
489
491
vshape = zeros (sshape .size )
490
492
vshape [0 :ndim (variable .value )] += array (shape (variable .value ))
491
493
axes = np .where (sshape != vshape )
492
494
lx = size (axes )
493
495
494
- self ._bog_history [id (variable )] = (axes , lx )
496
+ self ._BO_history [id (variable )] = (axes , lx )
495
497
496
498
if lx > 0 :
497
499
return np .apply_over_axes (np .sum , tgradient , axes )
@@ -506,7 +508,6 @@ def accumulation_operation_jacobian(self, variable, jacobian, gradient):
506
508
return gradient * jacobian
507
509
508
510
def index_operation_jacobian (self , variable , jacobian , gradient ):
509
- #index = jacobian
510
511
derivative = zeros (variable .shape )
511
512
derivative [jacobian ] = gradient
512
513
return derivative
0 commit comments