Skip to content

Commit 2dee9da

Browse files
farhanreynaldomichaelosthege
authored andcommitted
replace aesara.tensor acroynm to at
1 parent 97b54f0 commit 2dee9da

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

64 files changed

+1281
-1307
lines changed

RELEASE-NOTES.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
## PyMC3 vNext (4.0.0)
44
### Breaking Changes
5-
- ⚠ Theano-PyMC has been replaced with Aesara, so all external references to `theano`, `tt`, and `pymc3.theanof` need to be replaced with `aesara`, `aet`, and `pymc3.aesaraf` (see [4471](https://github.com/pymc-devs/pymc3/pull/4471)).
5+
- ⚠ Theano-PyMC has been replaced with Aesara, so all external references to `theano`, `tt`, and `pymc3.theanof` need to be replaced with `aesara`, `at`, and `pymc3.aesaraf` (see [4471](https://github.com/pymc-devs/pymc3/pull/4471)).
66
- ArviZ `plots` and `stats` *wrappers* were removed. The functions are now just available by their original names (see [#4549](https://github.com/pymc-devs/pymc3/pull/4471) and `3.11.2` release notes).
77
- ...
88

benchmarks/benchmarks/benchmarks.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
import timeit
1616

1717
import aesara
18-
import aesara.tensor as aet
18+
import aesara.tensor as at
1919
import arviz as az
2020
import numpy as np
2121
import pandas as pd
@@ -61,8 +61,8 @@ def mixture_model(random_seed=1234):
6161
mu = pm.Normal("mu", mu=0.0, sd=10.0, shape=w_true.shape)
6262
enforce_order = pm.Potential(
6363
"enforce_order",
64-
aet.switch(mu[0] - mu[1] <= 0, 0.0, -np.inf)
65-
+ aet.switch(mu[1] - mu[2] <= 0, 0.0, -np.inf),
64+
at.switch(mu[0] - mu[1] <= 0, 0.0, -np.inf)
65+
+ at.switch(mu[1] - mu[2] <= 0, 0.0, -np.inf),
6666
)
6767
tau = pm.Gamma("tau", alpha=1.0, beta=1.0, shape=w_true.shape)
6868
pm.NormalMixture("x_obs", w=w, mu=mu, tau=tau, observed=x)

docs/source/Advanced_usage_of_Aesara_in_PyMC3.rst

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ Shared variables allow us to use values in Aesara functions that are
1414
not considered an input to the function, but can still be changed
1515
later. They are very similar to global variables in may ways::
1616

17-
a = aet.scalar('a')
17+
a = at.scalar('a')
1818
# Create a new shared variable with initial value of 0.1
1919
b = aesara.shared(0.1)
2020
func = aesara.function([a], a * b)
@@ -184,13 +184,13 @@ We get
184184
Now, we use this to define a Aesara `Op`, that also computes the gradient::
185185

186186
import aesara
187-
import aesara.tensor as aet
187+
import aesara.tensor as at
188188
import aesara.tests.unittest_tools
189189
from aesara.graph.op import Op
190190

191191
class MuFromTheta(Op):
192-
itypes = [aet.dscalar]
193-
otypes = [aet.dscalar]
192+
itypes = [at.dscalar]
193+
otypes = [at.dscalar]
194194

195195
def perform(self, node, inputs, outputs):
196196
theta, = inputs
@@ -201,7 +201,7 @@ Now, we use this to define a Aesara `Op`, that also computes the gradient::
201201
theta, = inputs
202202
mu = self(theta)
203203
thetamu = theta * mu
204-
return [- g[0] * mu ** 2 / (1 + thetamu + aet.exp(-thetamu))]
204+
return [- g[0] * mu ** 2 / (1 + thetamu + at.exp(-thetamu))]
205205

206206
If you value your sanity, always check that the gradient is ok::
207207

@@ -213,11 +213,11 @@ We can now define our model using this new `Op`::
213213

214214
import pymc3 as pm
215215

216-
aet_mu_from_theta = MuFromTheta()
216+
at_mu_from_theta = MuFromTheta()
217217

218218
with pm.Model() as model:
219219
theta = pm.HalfNormal('theta', sigma=1)
220-
mu = pm.Deterministic('mu', aet_mu_from_theta(theta))
220+
mu = pm.Deterministic('mu', at_mu_from_theta(theta))
221221
pm.Normal('y', mu=mu, sigma=0.1, observed=[0.2, 0.21, 0.3])
222222

223223
trace = pm.sample()

docs/source/PyMC3_and_Aesara.rst

Lines changed: 26 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -34,25 +34,25 @@ First, we need to define symbolic variables for our inputs (this
3434
is similar to eg SymPy's `Symbol`)::
3535

3636
import aesara
37-
import aesara.tensor as aet
37+
import aesara.tensor as at
3838
# We don't specify the dtype of our input variables, so it
3939
# defaults to using float64 without any special config.
40-
a = aet.scalar('a')
41-
x = aet.vector('x')
42-
# `aet.ivector` creates a symbolic vector of integers.
43-
y = aet.ivector('y')
40+
a = at.scalar('a')
41+
x = at.vector('x')
42+
# `at.ivector` creates a symbolic vector of integers.
43+
y = at.ivector('y')
4444

4545
Next, we use those variables to build up a symbolic representation
4646
of the output of our function. Note that no computation is actually
4747
being done at this point. We only record what operations we need to
4848
do to compute the output::
4949

5050
inner = a * x**3 + y**2
51-
out = aet.exp(inner).sum()
51+
out = at.exp(inner).sum()
5252

5353
.. note::
5454

55-
In this example we use `aet.exp` to create a symbolic representation
55+
In this example we use `at.exp` to create a symbolic representation
5656
of the exponential of `inner`. Somewhat surprisingly, it
5757
would also have worked if we used `np.exp`. This is because numpy
5858
gives objects it operates on a chance to define the results of
@@ -77,8 +77,8 @@ We can call this function with actual arrays as many times as we want::
7777

7878
For the most part the symbolic Aesara variables can be operated on
7979
like NumPy arrays. Most NumPy functions are available in `aesara.tensor`
80-
(which is typically imported as `aet`). A lot of linear algebra operations
81-
can be found in `aet.nlinalg` and `aet.slinalg` (the NumPy and SciPy
80+
(which is typically imported as `at`). A lot of linear algebra operations
81+
can be found in `at.nlinalg` and `at.slinalg` (the NumPy and SciPy
8282
operations respectively). Some support for sparse matrices is available
8383
in `aesara.sparse`. For a detailed overview of available operations,
8484
see `the aesara api docs <https://aesara.readthedocs.io/en/latest/library/tensor/index.html>`_.
@@ -88,9 +88,9 @@ NumPy arrays are operations involving conditional execution.
8888

8989
Code like this won't work as expected::
9090

91-
a = aet.vector('a')
91+
a = at.vector('a')
9292
if (a > 0).all():
93-
b = aet.sqrt(a)
93+
b = at.sqrt(a)
9494
else:
9595
b = -a
9696

@@ -100,28 +100,28 @@ and according to the rules for this conversion, things that aren't empty
100100
containers or zero are converted to `True`. So the code is equivalent
101101
to this::
102102

103-
a = aet.vector('a')
104-
b = aet.sqrt(a)
103+
a = at.vector('a')
104+
b = at.sqrt(a)
105105

106-
To get the desired behaviour, we can use `aet.switch`::
106+
To get the desired behaviour, we can use `at.switch`::
107107

108-
a = aet.vector('a')
109-
b = aet.switch((a > 0).all(), aet.sqrt(a), -a)
108+
a = at.vector('a')
109+
b = at.switch((a > 0).all(), at.sqrt(a), -a)
110110

111111
Indexing also works similarly to NumPy::
112112

113-
a = aet.vector('a')
113+
a = at.vector('a')
114114
# Access the 10th element. This will fail when a function build
115115
# from this expression is executed with an array that is too short.
116116
b = a[10]
117117

118118
# Extract a subvector
119119
b = a[[1, 2, 10]]
120120

121-
Changing elements of an array is possible using `aet.set_subtensor`::
121+
Changing elements of an array is possible using `at.set_subtensor`::
122122

123-
a = aet.vector('a')
124-
b = aet.set_subtensor(a[:10], 1)
123+
a = at.vector('a')
124+
b = at.set_subtensor(a[:10], 1)
125125

126126
# is roughly equivalent to this (although aesara avoids
127127
# the copy if `a` isn't used anymore)
@@ -167,7 +167,7 @@ this is happening::
167167
# in exactly this way!
168168
model = pm.Model()
169169

170-
mu = aet.scalar('mu')
170+
mu = at.scalar('mu')
171171
model.add_free_variable(mu)
172172
model.add_logp_term(pm.Normal.dist(0, 1).logp(mu))
173173

@@ -195,15 +195,15 @@ is roughly equivalent to this::
195195

196196
# For illustration only, not real code!
197197
model = pm.Model()
198-
mu = aet.scalar('mu')
198+
mu = at.scalar('mu')
199199
model.add_free_variable(mu)
200200
model.add_logp_term(pm.Normal.dist(0, 1).logp(mu))
201201

202-
sd_log__ = aet.scalar('sd_log__')
202+
sd_log__ = at.scalar('sd_log__')
203203
model.add_free_variable(sd_log__)
204204
model.add_logp_term(corrected_logp_half_normal(sd_log__))
205205

206-
sd = aet.exp(sd_log__)
206+
sd = at.exp(sd_log__)
207207
model.add_deterministic_variable(sd)
208208

209209
model.add_logp_term(pm.Normal.dist(mu, sd).logp(data))
@@ -214,8 +214,8 @@ Aesara operation on them::
214214

215215
design_matrix = np.array([[...]])
216216
with pm.Model() as model:
217-
# beta is a aet.dvector
217+
# beta is a at.dvector
218218
beta = pm.Normal('beta', 0, 1, shape=len(design_matrix))
219-
predict = aet.dot(design_matrix, beta)
219+
predict = at.dot(design_matrix, beta)
220220
sd = pm.HalfCauchy('sd', beta=2.5)
221221
pm.Normal('y', mu=predict, sigma=sd, observed=data)

docs/source/developer_guide.rst

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -417,11 +417,11 @@ usually created in order to optimise performance. But getting a
417417
class Exp(tr.ElemwiseTransform):
418418
name = "exp"
419419
def backward(self, x):
420-
return aet.log(x)
420+
return at.log(x)
421421
def forward(self, x):
422-
return aet.exp(x)
422+
return at.exp(x)
423423
def jacobian_det(self, x):
424-
return -aet.log(x)
424+
return -at.log(x)
425425
426426
lognorm = Exp().apply(pm.Normal.dist(0., 1.))
427427
lognorm
@@ -562,7 +562,7 @@ sum them together to get the model logp:
562562
"""Aesara scalar of log-probability of the model"""
563563
with self:
564564
factors = [var.logpt for var in self.basic_RVs] + self.potentials
565-
logp = aet.sum([aet.sum(factor) for factor in factors])
565+
logp = at.sum([at.sum(factor) for factor in factors])
566566
...
567567
return logp
568568
@@ -660,7 +660,7 @@ does not edit or rewrite the graph directly.
660660
self._vars_joined, self._logpt_joined = self._build_joined(
661661
self._logpt, grad_vars, self._ordering.vmap)
662662
663-
grad = aet.grad(self._logpt_joined, self._vars_joined)
663+
grad = at.grad(self._logpt_joined, self._vars_joined)
664664
grad.name = '__grad'
665665
666666
inputs = [self._vars_joined]
@@ -670,7 +670,7 @@ does not edit or rewrite the graph directly.
670670
671671
672672
def _build_joined(self, logpt, args, vmap):
673-
args_joined = aet.vector('__args_joined')
673+
args_joined = at.vector('__args_joined')
674674
args_joined.tag.test_value = np.zeros(self.size, dtype=self.dtype)
675675
676676
joined_slices = {}

0 commit comments

Comments
 (0)