Skip to content

Commit c703e69

Browse files
authored
Convert :: to code-block directive (#2737)
* Convert `::` to code-block directive
1 parent e0cfa6e commit c703e69

32 files changed

+161
-181
lines changed

advanced_source/neural_style_tutorial.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@
8787
# to 255 tensor images.
8888
#
8989
#
90-
# .. Note::
90+
# .. note::
9191
# Here are links to download the images required to run the tutorial:
9292
# `picasso.jpg <https://pytorch.org/tutorials/_static/img/neural-style/picasso.jpg>`__ and
9393
# `dancing.jpg <https://pytorch.org/tutorials/_static/img/neural-style/dancing.jpg>`__.
@@ -183,7 +183,7 @@ def forward(self, input):
183183
return input
184184

185185
######################################################################
186-
# .. Note::
186+
# .. note::
187187
# **Important detail**: although this module is named ``ContentLoss``, it
188188
# is not a true PyTorch Loss function. If you want to define your content
189189
# loss as a PyTorch Loss function, you have to create a PyTorch autograd function
@@ -372,7 +372,7 @@ def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
372372
input_img = content_img.clone()
373373
# if you want to use white noise by using the following code:
374374
#
375-
# ::
375+
# .. code-block:: python
376376
#
377377
# input_img = torch.randn(content_img.data.size())
378378

beginner_source/blitz/neural_networks_tutorial.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ def forward(self, x):
161161
# ``.grad_fn`` attribute, you will see a graph of computations that looks
162162
# like this:
163163
#
164-
# ::
164+
# .. code-block:: sh
165165
#
166166
# input -> conv2d -> relu -> maxpool2d -> conv2d -> relu -> maxpool2d
167167
# -> flatten -> linear -> relu -> linear -> relu -> linear
@@ -253,7 +253,7 @@ def forward(self, x):
253253

254254

255255
###############################################################
256-
# .. Note::
256+
# .. note::
257257
#
258258
# Observe how gradient buffers had to be manually set to zero using
259259
# ``optimizer.zero_grad()``. This is because gradients are accumulated

beginner_source/data_loading_tutorial.py

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -50,9 +50,9 @@
5050
# estimation <https://blog.dlib.net/2014/08/real-time-face-pose-estimation.html>`__
5151
# on a few images from imagenet tagged as 'face'.
5252
#
53-
# Dataset comes with a csv file with annotations which looks like this:
53+
# Dataset comes with a ``.csv`` file with annotations which looks like this:
5454
#
55-
# ::
55+
# .. code-block:: sh
5656
#
5757
# image_name,part_0_x,part_0_y,part_1_x,part_1_y,part_2_x, ... ,part_67_x,part_67_y
5858
# 0805personali01.jpg,27,83,27,98, ... 84,134
@@ -196,7 +196,7 @@ def __getitem__(self, idx):
196196
# called. For this, we just need to implement ``__call__`` method and
197197
# if required, ``__init__`` method. We can then use a transform like this:
198198
#
199-
# ::
199+
# .. code-block:: python
200200
#
201201
# tsfm = Transform(params)
202202
# transformed_sample = tsfm(sample)
@@ -421,7 +421,9 @@ def show_landmarks_batch(sample_batched):
421421
# and dataloader. ``torchvision`` package provides some common datasets and
422422
# transforms. You might not even have to write custom classes. One of the
423423
# more generic datasets available in torchvision is ``ImageFolder``.
424-
# It assumes that images are organized in the following way: ::
424+
# It assumes that images are organized in the following way:
425+
#
426+
# .. code-block:: sh
425427
#
426428
# root/ants/xxx.png
427429
# root/ants/xxy.jpeg
@@ -435,7 +437,9 @@ def show_landmarks_batch(sample_batched):
435437
#
436438
# where 'ants', 'bees' etc. are class labels. Similarly generic transforms
437439
# which operate on ``PIL.Image`` like ``RandomHorizontalFlip``, ``Scale``,
438-
# are also available. You can use these to write a dataloader like this: ::
440+
# are also available. You can use these to write a dataloader like this:
441+
#
442+
# .. code-block:: pytorch
439443
#
440444
# import torch
441445
# from torchvision import transforms, datasets

beginner_source/dcgan_faces_tutorial.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -226,7 +226,7 @@
226226
# the ``celeba`` directory you just created. The resulting directory
227227
# structure should be:
228228
#
229-
# ::
229+
# .. code-block:: sh
230230
#
231231
# /path/to/celeba
232232
# -> img_align_celeba

beginner_source/hyperparameter_tuning_tutorial.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -462,7 +462,7 @@ def main(num_samples=10, max_num_epochs=10, gpus_per_trial=2):
462462
######################################################################
463463
# If you run the code, an example output could look like this:
464464
#
465-
# ::
465+
# .. code-block:: sh
466466
#
467467
# Number of trials: 10/10 (10 TERMINATED)
468468
# +-----+--------------+------+------+-------------+--------+---------+------------+

beginner_source/introyt/autogradyt_tutorial.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -213,7 +213,7 @@
213213
#########################################################################
214214
# Recall the computation steps we took to get here:
215215
#
216-
# ::
216+
# .. code-block:: python
217217
#
218218
# a = torch.linspace(0., 2. * math.pi, steps=25, requires_grad=True)
219219
# b = torch.sin(a)
@@ -456,10 +456,10 @@ def add_tensors2(x, y):
456456
# .. note::
457457
# The following code cell throws a runtime error. This is expected.
458458
#
459-
# ::
459+
# .. code-block:: python
460460
#
461-
# a = torch.linspace(0., 2. * math.pi, steps=25, requires_grad=True)
462-
# torch.sin_(a)
461+
# a = torch.linspace(0., 2. * math.pi, steps=25, requires_grad=True)
462+
# torch.sin_(a)
463463
#
464464

465465
#########################################################################

beginner_source/introyt/captumyt.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -109,11 +109,15 @@
109109
To install Captum in an Anaconda or pip virtual environment, use the
110110
appropriate command for your environment below:
111111
112-
With ``conda``::
112+
With ``conda``:
113+
114+
.. code-block:: sh
113115
114116
conda install pytorch torchvision captum flask-compress matplotlib=3.3.4 -c pytorch
115117
116-
With ``pip``::
118+
With ``pip``:
119+
120+
.. code-block:: sh
117121
118122
pip install torch torchvision captum matplotlib==3.3.4 Flask-Compress
119123

beginner_source/introyt/introyt1_tutorial.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -580,7 +580,7 @@ def forward(self, x):
580580
#
581581
# **When you run the cell above,** you should see something like this:
582582
#
583-
# ::
583+
# .. code-block:: sh
584584
#
585585
# [1, 2000] loss: 2.235
586586
# [1, 4000] loss: 1.940

beginner_source/introyt/tensorboardyt_tutorial.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,12 +24,16 @@
2424
To run this tutorial, you’ll need to install PyTorch, TorchVision,
2525
Matplotlib, and TensorBoard.
2626
27-
With ``conda``::
27+
With ``conda``:
28+
29+
.. code-block:: sh
2830
2931
conda install pytorch torchvision -c pytorch
3032
conda install matplotlib tensorboard
3133
32-
With ``pip``::
34+
With ``pip``:
35+
36+
.. code-block:: sh
3337
3438
pip install torch torchvision matplotlib tensorboard
3539

beginner_source/introyt/tensors_deeper_tutorial.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -292,14 +292,14 @@
292292
# binary operation on tensors if dissimilar shape?
293293
#
294294
# .. note::
295-
# The following cell throws a run-time error. This is intentional.
295+
# The following cell throws a run-time error. This is intentional.
296296
#
297-
# ::
297+
# .. code-block:: sh
298298
#
299-
# a = torch.rand(2, 3)
300-
# b = torch.rand(3, 2)
299+
# a = torch.rand(2, 3)
300+
# b = torch.rand(3, 2)
301301
#
302-
# print(a * b)
302+
# print(a * b)
303303
#
304304

305305

@@ -390,17 +390,17 @@
390390
# Here are some examples of attempts at broadcasting that will fail:
391391
#
392392
# .. note::
393-
# The following cell throws a run-time error. This is intentional.
393+
# The following cell throws a run-time error. This is intentional.
394394
#
395-
# ::
395+
# .. code-block:: python
396396
#
397-
# a = torch.ones(4, 3, 2)
397+
# a = torch.ones(4, 3, 2)
398398
#
399-
# b = a * torch.rand(4, 3) # dimensions must match last-to-first
399+
# b = a * torch.rand(4, 3) # dimensions must match last-to-first
400400
#
401-
# c = a * torch.rand( 2, 3) # both 3rd & 2nd dims different
401+
# c = a * torch.rand( 2, 3) # both 3rd & 2nd dims different
402402
#
403-
# d = a * torch.rand((0, )) # can't broadcast with an empty tensor
403+
# d = a * torch.rand((0, )) # can't broadcast with an empty tensor
404404
#
405405

406406

@@ -729,7 +729,7 @@
729729
# following code will throw a runtime error, regardless of whether you
730730
# have a GPU device available:
731731
#
732-
# ::
732+
# .. code-block:: python
733733
#
734734
# x = torch.rand(2, 2)
735735
# y = torch.rand(2, 2, device='gpu')
@@ -820,9 +820,9 @@
820820
# Another place you might use ``unsqueeze()`` is to ease broadcasting.
821821
# Recall the example above where we had the following code:
822822
#
823-
# ::
823+
# .. code-block:: python
824824
#
825-
# a = torch.ones(4, 3, 2)
825+
# a = torch.ones(4, 3, 2)
826826
#
827827
# c = a * torch.rand( 3, 1) # 3rd dim = 1, 2nd dim identical to a
828828
# print(c)

beginner_source/nn_tutorial.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -328,7 +328,7 @@ def forward(self, xb):
328328
# Previously for our training loop we had to update the values for each parameter
329329
# by name, and manually zero out the grads for each parameter separately, like this:
330330
#
331-
# ::
331+
# .. code-block:: python
332332
#
333333
# with torch.no_grad():
334334
# weights -= weights.grad * lr
@@ -342,7 +342,7 @@ def forward(self, xb):
342342
# and less prone to the error of forgetting some of our parameters, particularly
343343
# if we had a more complicated model:
344344
#
345-
# ::
345+
# .. code-block:: python
346346
#
347347
# with torch.no_grad():
348348
# for p in model.parameters(): p -= p.grad * lr
@@ -418,15 +418,15 @@ def forward(self, xb):
418418
#
419419
# This will let us replace our previous manually coded optimization step:
420420
#
421-
# ::
421+
# .. code-block:: python
422422
#
423423
# with torch.no_grad():
424424
# for p in model.parameters(): p -= p.grad * lr
425425
# model.zero_grad()
426426
#
427427
# and instead use just:
428428
#
429-
# ::
429+
# .. code-block:: python
430430
#
431431
# opt.step()
432432
# opt.zero_grad()
@@ -490,15 +490,15 @@ def get_model():
490490
###############################################################################
491491
# Previously, we had to iterate through minibatches of ``x`` and ``y`` values separately:
492492
#
493-
# ::
493+
# .. code-block:: python
494494
#
495495
# xb = x_train[start_i:end_i]
496496
# yb = y_train[start_i:end_i]
497497
#
498498
#
499499
# Now, we can do these two steps together:
500500
#
501-
# ::
501+
# .. code-block:: python
502502
#
503503
# xb,yb = train_ds[i*bs : i*bs+bs]
504504
#
@@ -534,15 +534,15 @@ def get_model():
534534
###############################################################################
535535
# Previously, our loop iterated over batches ``(xb, yb)`` like this:
536536
#
537-
# ::
537+
# .. code-block:: python
538538
#
539539
# for i in range((n-1)//bs + 1):
540540
# xb,yb = train_ds[i*bs : i*bs+bs]
541541
# pred = model(xb)
542542
#
543543
# Now, our loop is much cleaner, as ``(xb, yb)`` are loaded automatically from the data loader:
544544
#
545-
# ::
545+
# .. code-block:: python
546546
#
547547
# for xb,yb in train_dl:
548548
# pred = model(xb)

beginner_source/profiler.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ def forward(self, input, mask):
8282
# ``profiler.profile`` context manager. The ``with_stack=True`` parameter appends the
8383
# file and line number of the operation in the trace.
8484
#
85-
# .. WARNING::
85+
# .. warning::
8686
# ``with_stack=True`` incurs an additional overhead, and is better suited for investigating code.
8787
# Remember to remove it if you are benchmarking performance.
8888
#
@@ -115,7 +115,7 @@ def forward(self, input, mask):
115115
# `docs <https://pytorch.org/docs/stable/autograd.html#profiler>`__ for
116116
# valid sorting keys).
117117
#
118-
# .. Note::
118+
# .. note::
119119
# When running profiler in a notebook, you might see entries like ``<ipython-input-18-193a910735e8>(13): forward``
120120
# instead of filenames in the stacktrace. These correspond to ``<notebook-cell>(line number): calling-function``.
121121

beginner_source/saving_loading_models.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@
115115
#
116116
# **Output:**
117117
#
118-
# ::
118+
# .. code-block:: sh
119119
#
120120
# Model's state_dict:
121121
# conv1.weight torch.Size([6, 3, 5, 5])
@@ -175,15 +175,15 @@
175175
# normalization layers to evaluation mode before running inference.
176176
# Failing to do this will yield inconsistent inference results.
177177
#
178-
# .. Note ::
178+
# .. note::
179179
#
180180
# Notice that the ``load_state_dict()`` function takes a dictionary
181181
# object, NOT a path to a saved object. This means that you must
182182
# deserialize the saved *state_dict* before you pass it to the
183183
# ``load_state_dict()`` function. For example, you CANNOT load using
184184
# ``model.load_state_dict(PATH)``.
185185
#
186-
# .. Note ::
186+
# .. note::
187187
#
188188
# If you only plan to keep the best performing model (according to the
189189
# acquired validation loss), don't forget that ``best_model_state = model.state_dict()``

beginner_source/text_sentiment_ngrams_tutorial.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737
train_iter = iter(AG_NEWS(split="train"))
3838

3939
######################################################################
40-
# ::
40+
# .. code-block:: sh
4141
#
4242
# next(train_iter)
4343
# >>> (3, "Fears for T N pension after talks Unions representing workers at Turner
@@ -88,7 +88,7 @@ def yield_tokens(data_iter):
8888
######################################################################
8989
# The vocabulary block converts a list of tokens into integers.
9090
#
91-
# ::
91+
# .. code-block:: sh
9292
#
9393
# vocab(['here', 'is', 'an', 'example'])
9494
# >>> [475, 21, 30, 5297]
@@ -102,7 +102,7 @@ def yield_tokens(data_iter):
102102
######################################################################
103103
# The text pipeline converts a text string into a list of integers based on the lookup table defined in the vocabulary. The label pipeline converts the label into integers. For example,
104104
#
105-
# ::
105+
# .. code-block:: sh
106106
#
107107
# text_pipeline('here is the an example')
108108
# >>> [475, 21, 2, 30, 5297]
@@ -188,7 +188,7 @@ def forward(self, text, offsets):
188188
#
189189
# The ``AG_NEWS`` dataset has four labels and therefore the number of classes is four.
190190
#
191-
# ::
191+
# .. code-block:: sh
192192
#
193193
# 1 : World
194194
# 2 : Sports

0 commit comments

Comments
 (0)