Skip to content

Commit 0f854b7

Browse files
author
Svetlana Karslioglu
authored
Merge branch 'main' into main
2 parents 782bfe2 + 56a2faf commit 0f854b7

21 files changed

+102
-71
lines changed

.github/PULL_REQUEST_TEMPLATE.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,4 +8,4 @@ Fixes #ISSUE_NUMBER
88
- [ ] The issue that is being fixed is referred in the description (see above "Fixes #ISSUE_NUMBER")
99
- [ ] Only one issue is addressed in this pull request
1010
- [ ] Labels from the issue that this PR is fixing are added to this pull request
11-
- [ ] No unnessessary issues are included into this pull request.
11+
- [ ] No unnecessary issues are included into this pull request.

.github/scripts/docathon-label-sync.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,9 @@ def main():
1414
repo = g.get_repo(f'{repo_owner}/{repo_name}')
1515
pull_request = repo.get_pull(pull_request_number)
1616
pull_request_body = pull_request.body
17+
# PR without description
18+
if pull_request_body is None:
19+
return
1720

1821
# get issue number from the PR body
1922
if not re.search(r'#\d{1,5}', pull_request_body):

advanced_source/neural_style_tutorial.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -423,6 +423,9 @@ def run_style_transfer(cnn, normalization_mean, normalization_std,
423423
# We want to optimize the input and not the model parameters so we
424424
# update all the requires_grad fields accordingly
425425
input_img.requires_grad_(True)
426+
# We also put the model in evaluation mode, so that specific layers
427+
# such as dropout or batch normalization layers behave correctly.
428+
model.eval()
426429
model.requires_grad_(False)
427430

428431
optimizer = get_input_optimizer(input_img)

advanced_source/super_resolution_with_onnxruntime.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,7 @@
1616
and `ONNX Runtime <https://github.com/microsoft/onnxruntime>`__.
1717
You can get binary builds of ONNX and ONNX Runtime with
1818
``pip install onnx onnxruntime``.
19-
Note that ONNX Runtime is compatible with Python versions 3.5 to 3.7.
20-
21-
``NOTE``: This tutorial needs PyTorch master branch which can be installed by following
22-
the instructions `here <https://github.com/pytorch/pytorch#from-source>`__
19+
ONNX Runtime recommends using the latest stable runtime for PyTorch.
2320
2421
"""
2522

beginner_source/README.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,4 +23,4 @@ Beginner Tutorials
2323

2424
6. transformer_translation.py
2525
Language Translation with Transformers
26-
https://pytorch.org/tutorials/beginner/transformer_tutorial.html
26+
https://pytorch.org/tutorials/beginner/translation_transformer.html

beginner_source/deep_learning_60min_blitz.rst

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,11 +20,12 @@ Goal of this tutorial:
2020
- Understand PyTorch’s Tensor library and neural networks at a high level.
2121
- Train a small neural network to classify images
2222

23-
To run the tutorials below, make sure you have the `torch`_ and `torchvision`_
24-
packages installed.
23+
To run the tutorials below, make sure you have the `torch`_, `torchvision`_,
24+
and `matplotlib`_ packages installed.
2525

2626
.. _torch: https://github.com/pytorch/pytorch
2727
.. _torchvision: https://github.com/pytorch/vision
28+
.. _matplotlib: https://github.com/matplotlib/matplotlib
2829

2930
.. toctree::
3031
:hidden:
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
Finetuning Torchvision Models
2+
=============================
3+
4+
This tutorial has been moved to https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html
5+
6+
It will redirect in 3 seconds.
7+
8+
.. raw:: html
9+
10+
<meta http-equiv="Refresh" content="3; url='https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html'" />

beginner_source/former_torchies/parallelism_tutorial.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,10 @@ def forward(self, x):
5353

5454
class MyDataParallel(nn.DataParallel):
5555
def __getattr__(self, name):
56-
return getattr(self.module, name)
56+
try:
57+
return super().__getattr__(name)
58+
except AttributeError:
59+
return getattr(self.module, name)
5760

5861
########################################################################
5962
# **Primitives on which DataParallel is implemented upon:**

beginner_source/introyt/tensorboardyt_tutorial.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,13 @@
6464
# PyTorch TensorBoard support
6565
from torch.utils.tensorboard import SummaryWriter
6666

67+
# In case you are using an environment that has TensorFlow installed,
68+
# such as Google Colab, uncomment the following code to avoid
69+
# a bug with saving embeddings to your TensorBoard directory
70+
71+
# import tensorflow as tf
72+
# import tensorboard as tb
73+
# tf.io.gfile = tb.compat.tensorflow_stub.io.gfile
6774

6875
######################################################################
6976
# Showing Images in TensorBoard

beginner_source/introyt/trainingyt.py

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -290,15 +290,19 @@ def train_one_epoch(epoch_index, tb_writer):
290290
model.train(True)
291291
avg_loss = train_one_epoch(epoch_number, writer)
292292

293-
# We don't need gradients on to do reporting
294-
model.train(False)
295-
293+
296294
running_vloss = 0.0
297-
for i, vdata in enumerate(validation_loader):
298-
vinputs, vlabels = vdata
299-
voutputs = model(vinputs)
300-
vloss = loss_fn(voutputs, vlabels)
301-
running_vloss += vloss
295+
# Set the model to evaluation mode, disabling dropout and using population
296+
# statistics for batch normalization.
297+
model.eval()
298+
299+
# Disable gradient computation and reduce memory consumption.
300+
with torch.no_grad():
301+
for i, vdata in enumerate(validation_loader):
302+
vinputs, vlabels = vdata
303+
voutputs = model(vinputs)
304+
vloss = loss_fn(voutputs, vlabels)
305+
running_vloss += vloss
302306

303307
avg_vloss = running_vloss / (i + 1)
304308
print('LOSS train {} valid {}'.format(avg_loss, avg_vloss))

beginner_source/nn_tutorial.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,11 @@
7575
import numpy as np
7676

7777
pyplot.imshow(x_train[0].reshape((28, 28)), cmap="gray")
78+
# ``pyplot.show()`` only if not on Colab
79+
try:
80+
import google.colab
81+
except ImportError:
82+
pyplot.show()
7883
print(x_train.shape)
7984

8085
###############################################################################
@@ -790,8 +795,7 @@ def __len__(self):
790795
return len(self.dl)
791796

792797
def __iter__(self):
793-
batches = iter(self.dl)
794-
for b in batches:
798+
for b in self.dl:
795799
yield (self.func(*b))
796800

797801
train_dl, valid_dl = get_data(train_ds, valid_ds, bs)

beginner_source/transformer_tutorial.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737
# ``nn.TransformerEncoder`` consists of multiple layers of
3838
# `nn.TransformerEncoderLayer <https://pytorch.org/docs/stable/generated/torch.nn.TransformerEncoderLayer.html>`__.
3939
# Along with the input sequence, a square attention mask is required because the
40-
# self-attention layers in ``nn.TransformerEncoder`` are only allowed to attend
40+
# self-attention layers in ``nn.TransformerDecoder`` are only allowed to attend
4141
# the earlier positions in the sequence. For the language modeling task, any
4242
# tokens on the future positions should be masked. To produce a probability
4343
# distribution over output words, the output of the ``nn.TransformerEncoder``
@@ -149,7 +149,7 @@ def forward(self, x: Tensor) -> Tensor:
149149
# into ``batch_size`` columns. If the data does not divide evenly into
150150
# ``batch_size`` columns, then the data is trimmed to fit. For instance, with
151151
# the alphabet as the data (total length of 26) and ``batch_size=4``, we would
152-
# divide the alphabet into 4 sequences of length 6:
152+
# divide the alphabet into sequences of length 6, resulting in 4 of such sequences.
153153
#
154154
# .. math::
155155
# \begin{bmatrix}

intermediate_source/char_rnn_classification_tutorial.py

Lines changed: 16 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,16 @@
22
"""
33
NLP From Scratch: Classifying Names with a Character-Level RNN
44
**************************************************************
5-
**Author**: `Sean Robertson <https://github.com/spro/practical-pytorch>`_
5+
**Author**: `Sean Robertson <https://github.com/spro>`_
66
7-
We will be building and training a basic character-level RNN to classify
8-
words. This tutorial, along with the following two, show how to do
9-
preprocess data for NLP modeling "from scratch", in particular not using
10-
many of the convenience functions of `torchtext`, so you can see how
11-
preprocessing for NLP modeling works at a low level.
7+
We will be building and training a basic character-level Recurrent Neural
8+
Network (RNN) to classify words. This tutorial, along with two other
9+
Natural Language Processing (NLP) "from scratch" tutorials
10+
:doc:`/intermediate/char_rnn_generation_tutorial` and
11+
:doc:`/intermediate/seq2seq_translation_tutorial`, show how to
12+
preprocess data to model NLP. In particular these tutorials do not
13+
use many of the convenience functions of `torchtext`, so you can see how
14+
preprocessing to model NLP works at a low level.
1215
1316
A character-level RNN reads words as a series of characters -
1417
outputting a prediction and "hidden state" at each step, feeding its
@@ -32,13 +35,15 @@
3235
(-2.68) Dutch
3336
3437
35-
**Recommended Reading:**
38+
Recommended Preparation
39+
=======================
3640
37-
I assume you have at least installed PyTorch, know Python, and
38-
understand Tensors:
41+
Before starting this tutorial it is recommended that you have installed PyTorch,
42+
and have a basic understanding of Python programming language and Tensors:
3943
4044
- https://pytorch.org/ For installation instructions
4145
- :doc:`/beginner/deep_learning_60min_blitz` to get started with PyTorch in general
46+
and learn the basics of Tensors
4247
- :doc:`/beginner/pytorch_with_examples` for a wide and deep overview
4348
- :doc:`/beginner/former_torchies_tutorial` if you are former Lua Torch user
4449
@@ -181,10 +186,6 @@ def lineToTensor(line):
181186
# is just 2 linear layers which operate on an input and hidden state, with
182187
# a ``LogSoftmax`` layer after the output.
183188
#
184-
# .. figure:: https://i.imgur.com/Z2xbySO.png
185-
# :alt:
186-
#
187-
#
188189

189190
import torch.nn as nn
190191

@@ -195,13 +196,13 @@ def __init__(self, input_size, hidden_size, output_size):
195196
self.hidden_size = hidden_size
196197

197198
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
198-
self.i2o = nn.Linear(input_size + hidden_size, output_size)
199+
self.h2o = nn.Linear(hidden_size, output_size)
199200
self.softmax = nn.LogSoftmax(dim=1)
200201

201202
def forward(self, input, hidden):
202203
combined = torch.cat((input, hidden), 1)
203204
hidden = self.i2h(combined)
204-
output = self.i2o(combined)
205+
output = self.h2o(hidden)
205206
output = self.softmax(output)
206207
return output, hidden
207208

intermediate_source/char_rnn_generation_tutorial.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
"""
33
NLP From Scratch: Generating Names with a Character-Level RNN
44
*************************************************************
5-
**Author**: `Sean Robertson <https://github.com/spro/practical-pytorch>`_
5+
**Author**: `Sean Robertson <https://github.com/spro>`_
66
77
This is our second of three tutorials on "NLP From Scratch".
88
In the `first tutorial </intermediate/char_rnn_classification_tutorial>`

intermediate_source/dynamic_quantization_bert_tutorial.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ built-in F1 score calculation helper function.
6868
.. code:: shell
6969
7070
pip install sklearn
71-
pip install transformers
71+
pip install transformers==4.29.2
7272
7373
7474
Because we will be using the beta parts of the PyTorch, it is

intermediate_source/mario_rl_tutorial.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -711,17 +711,18 @@ def record(self, episode, epsilon, step):
711711
f"{datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S'):>20}\n"
712712
)
713713

714-
for metric in ["ep_rewards", "ep_lengths", "ep_avg_losses", "ep_avg_qs"]:
715-
plt.plot(getattr(self, f"moving_avg_{metric}"))
716-
plt.savefig(getattr(self, f"{metric}_plot"))
714+
for metric in ["ep_lengths", "ep_avg_losses", "ep_avg_qs", "ep_rewards"]:
717715
plt.clf()
716+
plt.plot(getattr(self, f"moving_avg_{metric}"), label=f"moving_avg_{metric}")
717+
plt.legend()
718+
plt.savefig(getattr(self, f"{metric}_plot"))
718719

719720

720721
######################################################################
721722
# Let’s play!
722723
# """""""""""""""
723724
#
724-
# In this example we run the training loop for 10 episodes, but for Mario to truly learn the ways of
725+
# In this example we run the training loop for 40 episodes, but for Mario to truly learn the ways of
725726
# his world, we suggest running the loop for at least 40,000 episodes!
726727
#
727728
use_cuda = torch.cuda.is_available()
@@ -735,7 +736,7 @@ def record(self, episode, epsilon, step):
735736

736737
logger = MetricLogger(save_dir)
737738

738-
episodes = 10
739+
episodes = 40
739740
for e in range(episodes):
740741

741742
state = env.reset()

intermediate_source/seq2seq_translation_tutorial.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
"""
33
NLP From Scratch: Translation with a Sequence to Sequence Network and Attention
44
*******************************************************************************
5-
**Author**: `Sean Robertson <https://github.com/spro/practical-pytorch>`_
5+
**Author**: `Sean Robertson <https://github.com/spro>`_
66
77
This is the third and final tutorial on doing "NLP From Scratch", where we
88
write our own classes and functions to preprocess the data to do our NLP

intermediate_source/tensorboard_profiler_tutorial.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
-----
1919
To install ``torch`` and ``torchvision`` use the following command:
2020
21-
::
21+
.. code-block::
2222
2323
pip install torch torchvision
2424
@@ -160,23 +160,23 @@ def train(data):
160160
#
161161
# Install PyTorch Profiler TensorBoard Plugin.
162162
#
163-
# ::
163+
# .. code-block::
164164
#
165165
# pip install torch_tb_profiler
166166
#
167167

168168
######################################################################
169169
# Launch the TensorBoard.
170170
#
171-
# ::
171+
# .. code-block::
172172
#
173173
# tensorboard --logdir=./log
174174
#
175175

176176
######################################################################
177177
# Open the TensorBoard profile URL in Google Chrome browser or Microsoft Edge browser.
178178
#
179-
# ::
179+
# .. code-block::
180180
#
181181
# http://localhost:6006/#pytorch_profiler
182182
#
@@ -287,7 +287,7 @@ def train(data):
287287
# In this example, we follow the "Performance Recommendation" and set ``num_workers`` as below,
288288
# pass a different name such as ``./log/resnet18_4workers`` to ``tensorboard_trace_handler``, and run it again.
289289
#
290-
# ::
290+
# .. code-block::
291291
#
292292
# train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True, num_workers=4)
293293
#
@@ -316,7 +316,7 @@ def train(data):
316316
#
317317
# You can try it by using existing example on Azure
318318
#
319-
# ::
319+
# .. code-block::
320320
#
321321
# pip install azure-storage-blob
322322
# tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/memory_demo_1_10
@@ -366,7 +366,7 @@ def train(data):
366366
#
367367
# You can try it by using existing example on Azure:
368368
#
369-
# ::
369+
# .. code-block::
370370
#
371371
# pip install azure-storage-blob
372372
# tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/distributed_bert

intermediate_source/torchvision_tutorial.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ Let’s write a ``torch.utils.data.Dataset`` class for this dataset.
145145
num_objs = len(obj_ids)
146146
boxes = []
147147
for i in range(num_objs):
148-
pos = np.where(masks[i])
148+
pos = np.nonzero(masks[i])
149149
xmin = np.min(pos[1])
150150
xmax = np.max(pos[1])
151151
ymin = np.min(pos[0])

prototype_source/README.txt

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
Prototype Tutorials
22
------------------
33
1. distributed_rpc_profiling.rst
4-
Profiling PyTorch RPC-Based Workloads
5-
https://github.com/pytorch/tutorials/blob/release/1.6/prototype_source/distributed_rpc_profiling.rst
4+
Profiling PyTorch RPC-Based Workloads
5+
https://github.com/pytorch/tutorials/blob/main/prototype_source/distributed_rpc_profiling.rst
66

77
2. graph_mode_static_quantization_tutorial.py
88
Graph Mode Post Training Static Quantization in PyTorch
@@ -21,8 +21,8 @@ Prototype Tutorials
2121
https://github.com/pytorch/tutorials/blob/main/prototype_source/torchscript_freezing.py
2222

2323
6. vulkan_workflow.rst
24-
Vulkan Backend User Workflow
25-
https://pytorch.org/tutorials/intermediate/vulkan_workflow.html
24+
Vulkan Backend User Workflow
25+
https://pytorch.org/tutorials/intermediate/vulkan_workflow.html
2626

2727
7. fx_graph_mode_ptq_static.rst
2828
FX Graph Mode Post Training Static Quantization

0 commit comments

Comments
 (0)