Skip to content

Commit 619b74e

Browse files
Change import pytensor.tensor as at to as pt everywhere in the docs
1 parent 2cef9c0 commit 619b74e

23 files changed

+344
-344
lines changed

doc/glossary.rst

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ Glossary
66
.. testsetup::
77

88
import pytensor
9-
import pytensor.tensor as at
9+
import pytensor.tensor as pt
1010

1111
.. glossary::
1212

@@ -31,7 +31,7 @@ Glossary
3131
A variable with an immutable value.
3232
For example, when you type
3333

34-
>>> x = at.ivector()
34+
>>> x = pt.ivector()
3535
>>> y = x + 3
3636

3737
Then a `constant` is created to represent the ``3`` in the graph.
@@ -151,7 +151,7 @@ Glossary
151151
The the main data structure you work with when using PyTensor.
152152
For example,
153153

154-
>>> x = at.ivector()
154+
>>> x = pt.ivector()
155155
>>> y = -x**2
156156

157157
``x`` and ``y`` are both :class:`Variable`\s, i.e. instances of the :class:`Variable` class.

doc/introduction.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -69,8 +69,8 @@ its features, but it illustrates concretely what PyTensor is.
6969
from pytensor import tensor as at
7070
7171
# declare two symbolic floating-point scalars
72-
a = at.dscalar()
73-
b = at.dscalar()
72+
a = pt.dscalar()
73+
b = pt.dscalar()
7474
7575
# create a simple expression
7676
c = a + b

doc/library/compile/io.rst

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ defining an arity-2 function ``inc``.
8383
>>> import pytensor.tensor as at
8484
>>> from pytensor import function
8585
>>> from pytensor.compile.io import In
86-
>>> u, x, s = at.scalars('u', 'x', 's')
86+
>>> u, x, s = pt.scalars('u', 'x', 's')
8787
>>> inc = function([u, In(x, value=3), In(s, update=(s+x*u), value=10.0)], [])
8888

8989
Since we provided a ``value`` for ``s`` and ``x``, we can call it with just a value for ``u`` like this:
@@ -184,7 +184,7 @@ To show some examples of these access methods...
184184

185185

186186
>>> from pytensor import tensor as at, function
187-
>>> a, b, c = at.scalars('xys') # set the internal names of graph nodes
187+
>>> a, b, c = pt.scalars('xys') # set the internal names of graph nodes
188188
>>> # Note that the name of c is 's', not 'c'!
189189
>>> fn = function([a, b, ((c, c+a+b), 10.0)], [])
190190

@@ -238,10 +238,10 @@ Example:
238238
>>> import pytensor
239239
>>> from pytensor import tensor as at
240240
>>> from pytensor.compile.io import In
241-
>>> x = at.scalar()
242-
>>> y = at.scalar('y')
243-
>>> z = at.scalar('z')
244-
>>> w = at.scalar('w')
241+
>>> x = pt.scalar()
242+
>>> y = pt.scalar('y')
243+
>>> z = pt.scalar('z')
244+
>>> w = pt.scalar('w')
245245

246246
>>> fn = pytensor.function(inputs=[x, y, In(z, value=42), ((w, w+x), 0)],
247247
... outputs=x + y + z)
@@ -308,7 +308,7 @@ If a list of ``Variable`` or ``Out`` instances is given as argument, then the co
308308

309309
>>> import numpy
310310
>>> from pytensor.compile.io import Out
311-
>>> x, y, s = at.matrices('xys')
311+
>>> x, y, s = pt.matrices('xys')
312312

313313
>>> # print a list of 2 ndarrays
314314
>>> fn1 = pytensor.function([x], [x+x, Out((x+x).T, borrow=True)])

doc/library/d3viz/index.ipynb

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@
7070
},
7171
"outputs": [],
7272
"source": [
73-
"import pytensor as th\n",
73+
"import pytensor\n",
7474
"import pytensor.tensor as at\n",
7575
"import numpy as np"
7676
]
@@ -96,16 +96,16 @@
9696
"nhiddens = 50\n",
9797
"\n",
9898
"rng = np.random.RandomState(0)\n",
99-
"x = at.dmatrix('x')\n",
100-
"wh = th.shared(rng.normal(0, 1, (nfeatures, nhiddens)), borrow=True)\n",
101-
"bh = th.shared(np.zeros(nhiddens), borrow=True)\n",
102-
"h = at.sigmoid(at.dot(x, wh) + bh)\n",
99+
"x = pt.dmatrix('x')\n",
100+
"wh = pytensor.shared(rng.normal(0, 1, (nfeatures, nhiddens)), borrow=True)\n",
101+
"bh = pytensor.shared(np.zeros(nhiddens), borrow=True)\n",
102+
"h = pt.sigmoid(pt.dot(x, wh) + bh)\n",
103103
"\n",
104-
"wy = th.shared(rng.normal(0, 1, (nhiddens, noutputs)))\n",
105-
"by = th.shared(np.zeros(noutputs), borrow=True)\n",
106-
"y = at.special.softmax(at.dot(h, wy) + by)\n",
104+
"wy = pytensor.shared(rng.normal(0, 1, (nhiddens, noutputs)))\n",
105+
"by = pytensor.shared(np.zeros(noutputs), borrow=True)\n",
106+
"y = pt.special.softmax(pt.dot(h, wy) + by)\n",
107107
"\n",
108-
"predict = th.function([x], y)"
108+
"predict = pytensor.function([x], y)"
109109
]
110110
},
111111
{
@@ -276,7 +276,7 @@
276276
},
277277
"outputs": [],
278278
"source": [
279-
"predict_profiled = th.function([x], y, profile=True)\n",
279+
"predict_profiled = pytensor.function([x], y, profile=True)\n",
280280
"\n",
281281
"x_val = rng.normal(0, 1, (ninputs, nfeatures))\n",
282282
"y_val = predict_profiled(x_val)"
@@ -388,12 +388,12 @@
388388
},
389389
"outputs": [],
390390
"source": [
391-
"x, y, z = at.scalars('xyz')\n",
392-
"e = at.sigmoid((x + y + z)**2)\n",
393-
"op = th.compile.builders.OpFromGraph([x, y, z], [e])\n",
391+
"x, y, z = pt.scalars('xyz')\n",
392+
"e = pt.sigmoid((x + y + z)**2)\n",
393+
"op = pytensor.compile.builders.OpFromGraph([x, y, z], [e])\n",
394394
"\n",
395395
"e2 = op(x, y, z) + op(z, y, x)\n",
396-
"f = th.function([x, y, z], e2)"
396+
"f = pytensor.function([x, y, z], e2)"
397397
]
398398
},
399399
{
@@ -433,13 +433,13 @@
433433
},
434434
"outputs": [],
435435
"source": [
436-
"x, y, z = at.scalars('xyz')\n",
436+
"x, y, z = pt.scalars('xyz')\n",
437437
"e = x * y\n",
438-
"op = th.compile.builders.OpFromGraph([x, y], [e])\n",
438+
"op = pytensor.compile.builders.OpFromGraph([x, y], [e])\n",
439439
"e2 = op(x, y) + z\n",
440-
"op2 = th.compile.builders.OpFromGraph([x, y, z], [e2])\n",
440+
"op2 = pytensor.compile.builders.OpFromGraph([x, y, z], [e2])\n",
441441
"e3 = op2(x, y, z) + z\n",
442-
"f = th.function([x, y, z], [e3])"
442+
"f = pytensor.function([x, y, z], [e3])"
443443
]
444444
},
445445
{

doc/library/d3viz/index.rst

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -49,8 +49,8 @@ hidden layer and a softmax output layer.
4949

5050
.. code:: python
5151
52-
import pytensor as th
53-
import pytensor.tensor as at
52+
import pytensor
53+
import pytensor.tensor as pt
5454
import numpy as np
5555
5656
ninputs = 1000
@@ -59,16 +59,16 @@ hidden layer and a softmax output layer.
5959
nhiddens = 50
6060
6161
rng = np.random.RandomState(0)
62-
x = at.dmatrix('x')
63-
wh = th.shared(rng.normal(0, 1, (nfeatures, nhiddens)), borrow=True)
64-
bh = th.shared(np.zeros(nhiddens), borrow=True)
65-
h = at.sigmoid(at.dot(x, wh) + bh)
62+
x = pt.dmatrix('x')
63+
wh = pytensor.shared(rng.normal(0, 1, (nfeatures, nhiddens)), borrow=True)
64+
bh = pytensor.shared(np.zeros(nhiddens), borrow=True)
65+
h = pt.sigmoid(pt.dot(x, wh) + bh)
6666
67-
wy = th.shared(rng.normal(0, 1, (nhiddens, noutputs)))
68-
by = th.shared(np.zeros(noutputs), borrow=True)
69-
y = at.special.softmax(at.dot(h, wy) + by)
67+
wy = pytensor.shared(rng.normal(0, 1, (nhiddens, noutputs)))
68+
by = pytensor.shared(np.zeros(noutputs), borrow=True)
69+
y = pt.special.softmax(pt.dot(h, wy) + by)
7070
71-
predict = th.function([x], y)
71+
predict = pytensor.function([x], y)
7272
7373
The function ``predict`` outputs the probability of 10 classes. You can
7474
visualize it with :py:func:`pytensor.printing.pydotprint` as follows:
@@ -151,7 +151,7 @@ random data:
151151

152152
.. code:: python
153153
154-
predict_profiled = th.function([x], y, profile=True)
154+
predict_profiled = pytensor.function([x], y, profile=True)
155155
156156
x_val = rng.normal(0, 1, (ninputs, nfeatures))
157157
y_val = predict_profiled(x_val)
@@ -209,12 +209,12 @@ node defines a nested graph, which will be visualized accordingly by ``d3viz``.
209209

210210
.. code:: python
211211
212-
x, y, z = at.scalars('xyz')
213-
e = at.sigmoid((x + y + z)**2)
214-
op = th.compile.builders.OpFromGraph([x, y, z], [e])
212+
x, y, z = pt.scalars('xyz')
213+
e = pt.sigmoid((x + y + z)**2)
214+
op = pytensor.compile.builders.OpFromGraph([x, y, z], [e])
215215
216216
e2 = op(x, y, z) + op(z, y, x)
217-
f = th.function([x, y, z], e2)
217+
f = pytensor.function([x, y, z], e2)
218218
219219
.. code:: python
220220
@@ -238,13 +238,13 @@ the following example.
238238

239239
.. code:: python
240240
241-
x, y, z = at.scalars('xyz')
241+
x, y, z = pt.scalars('xyz')
242242
e = x * y
243-
op = th.compile.builders.OpFromGraph([x, y], [e])
243+
op = pytensor.compile.builders.OpFromGraph([x, y], [e])
244244
e2 = op(x, y) + z
245-
op2 = th.compile.builders.OpFromGraph([x, y, z], [e2])
245+
op2 = pytensor.compile.builders.OpFromGraph([x, y, z], [e2])
246246
e3 = op2(x, y, z) + z
247-
f = th.function([x, y, z], [e3])
247+
f = pytensor.function([x, y, z], [e3])
248248
249249
.. code:: python
250250

doc/library/printing.rst

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@ Intermediate values in a computation cannot be printed in
2323
the normal python way with the print statement, because PyTensor has no *statements*.
2424
Instead there is the :class:`Print` Op.
2525

26-
>>> from pytensor import tensor as at, function, printing
27-
>>> x = at.dvector()
26+
>>> from pytensor import tensor as pt, function, printing
27+
>>> x = pt.dvector()
2828
>>> hello_world_op = printing.Print('hello world')
2929
>>> printed_x = hello_world_op(x)
3030
>>> f = function([x], printed_x)
@@ -53,7 +53,7 @@ PyTensor also provides :func:`pytensor.printing.pydotprint` that creates a png i
5353

5454
>>> from pytensor import pp, grad,
5555
>>> from pytensor import tensor as at
56-
>>> x = at.dscalar('x')
56+
>>> x = pt.dscalar('x')
5757
>>> y = x ** 2
5858
>>> gy = grad(y, x)
5959
>>> pp(gy) # print out the gradient prior to rewriting
@@ -62,7 +62,7 @@ PyTensor also provides :func:`pytensor.printing.pydotprint` that creates a png i
6262
>>> pp(f.maker.fgraph.outputs[0])
6363
'(TensorConstant{2.0} * x)'
6464

65-
The parameter in at.dscalar('x') in the first line is the name of this variable
65+
The parameter in pt.dscalar('x') in the first line is the name of this variable
6666
in the graph. This name is used when printing the graph to make it more readable.
6767
If no name is provided the variable x is printed as its type as returned by
6868
``x.type()``. In this example - ``<TensorType(float64, ())>``.

0 commit comments

Comments
 (0)