Skip to content

Commit 6c8a3bf

Browse files
authored
Merge branch 'main' into fgsm-fix
2 parents bfe4e02 + 1fe4025 commit 6c8a3bf

22 files changed

+68
-91
lines changed

.github/workflows/build-tutorials.yml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -177,6 +177,16 @@ jobs:
177177
178178
docker exec -t "${container_name}" sh -c ".jenkins/build.sh"
179179
180+
- name: Upload docs preview
181+
uses: seemethere/upload-artifact-s3@v5
182+
if: ${{ github.event_name == 'pull_request' }}
183+
with:
184+
retention-days: 14
185+
s3-bucket: doc-previews
186+
if-no-files-found: error
187+
path: docs
188+
s3-prefix: pytorch/tutorials/${{ github.event.pull_request.number }}
189+
180190
- name: Teardown Linux
181191
uses: pytorch/test-infra/.github/actions/teardown-linux@main
182192
if: always()

advanced_source/cpp_frontend.rst

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1216,9 +1216,6 @@ tensors and display them with matplotlib:
12161216
12171217
.. code-block:: python
12181218
1219-
from __future__ import print_function
1220-
from __future__ import unicode_literals
1221-
12221219
import argparse
12231220
12241221
import matplotlib.pyplot as plt

advanced_source/neural_style_tutorial.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,8 +47,6 @@
4747
# - ``torchvision.models`` (train or load pretrained models)
4848
# - ``copy`` (to deep copy the models; system package)
4949

50-
from __future__ import print_function
51-
5250
import torch
5351
import torch.nn as nn
5452
import torch.nn.functional as F

advanced_source/rpc_ddp_tutorial.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
Combining Distributed DataParallel with Distributed RPC Framework
22
=================================================================
3-
**Authors**: `Pritam Damania <https://github.com/pritamdamania87>`_ and `Yi Wang <https://github.com/SciPioneer>`_
3+
**Authors**: `Pritam Damania <https://github.com/pritamdamania87>`_ and `Yi Wang <https://github.com/wayi1>`_
44

55
.. note::
66
|edit| View and edit this tutorial in `github <https://github.com/pytorch/tutorials/blob/main/advanced_source/rpc_ddp_tutorial.rst>`__.

beginner_source/Intro_to_TorchScript_tutorial.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333

3434
import torch # This is all you need to use both PyTorch and TorchScript!
3535
print(torch.__version__)
36+
torch.manual_seed(191009) # set the seed for reproducibility
3637

3738

3839
######################################################################
@@ -308,7 +309,7 @@ def forward(self, x, h):
308309

309310
# New inputs
310311
x, h = torch.rand(3, 4), torch.rand(3, 4)
311-
traced_cell(x, h)
312+
print(scripted_cell(x, h))
312313

313314

314315
######################################################################

beginner_source/chatbot_tutorial.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -92,11 +92,6 @@
9292
# After that, let’s import some necessities.
9393
#
9494

95-
from __future__ import absolute_import
96-
from __future__ import division
97-
from __future__ import print_function
98-
from __future__ import unicode_literals
99-
10095
import torch
10196
from torch.jit import script, trace
10297
import torch.nn as nn

beginner_source/data_loading_tutorial.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818
1919
"""
2020

21-
from __future__ import print_function, division
2221
import os
2322
import torch
2423
import pandas as pd

beginner_source/deploy_seq2seq_hybrid_frontend_tutorial.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -101,11 +101,6 @@
101101
# maximum length output that the model is capable of producing.
102102
#
103103

104-
from __future__ import absolute_import
105-
from __future__ import division
106-
from __future__ import print_function
107-
from __future__ import unicode_literals
108-
109104
import torch
110105
import torch.nn as nn
111106
import torch.nn.functional as F

beginner_source/fgsm_tutorial.py

Lines changed: 9 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,6 @@
9090
# into the implementation.
9191
#
9292

93-
from __future__ import print_function
9493
import torch
9594
import torch.nn as nn
9695
import torch.nn.functional as F
@@ -99,13 +98,6 @@
9998
import numpy as np
10099
import matplotlib.pyplot as plt
101100

102-
# NOTE: This is a hack to get around "User-agent" limitations when downloading MNIST datasets
103-
# see, https://github.com/pytorch/vision/issues/3497 for more information
104-
from six.moves import urllib
105-
opener = urllib.request.build_opener()
106-
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
107-
urllib.request.install_opener(opener)
108-
109101

110102
######################################################################
111103
# Implementation
@@ -141,6 +133,8 @@
141133
epsilons = [0, .05, .1, .15, .2, .25, .3]
142134
pretrained_model = "data/lenet_mnist_model.pth"
143135
use_cuda=True
136+
# Set random seed for reproducibility
137+
torch.manual_seed(42)
144138

145139

146140
######################################################################
@@ -186,19 +180,19 @@ def forward(self, x):
186180
test_loader = torch.utils.data.DataLoader(
187181
datasets.MNIST('../data', train=False, download=True, transform=transforms.Compose([
188182
transforms.ToTensor(),
189-
transforms.Normalize((0.1307,), (0.3081,))
183+
transforms.Normalize((0.1307,), (0.3081,)),
190184
])),
191185
batch_size=1, shuffle=True)
192186

193187
# Define what device we are using
194188
print("CUDA Available: ",torch.cuda.is_available())
195-
device = torch.device("cuda" if (use_cuda and torch.cuda.is_available()) else "cpu")
189+
device = torch.device("cuda" if use_cuda and torch.cuda.is_available() else "cpu")
196190

197191
# Initialize the network
198192
model = Net().to(device)
199193

200194
# Load the pretrained model
201-
model.load_state_dict(torch.load(pretrained_model, map_location=device))
195+
model.load_state_dict(torch.load(pretrained_model, weights_only=True, map_location=device))
202196

203197
# Set the model in evaluation mode. In this case this is for the Dropout layers
204198
model.eval()
@@ -324,7 +318,7 @@ def test( model, device, test_loader, epsilon ):
324318
if final_pred.item() == target.item():
325319
correct += 1
326320
# Special case for saving 0 epsilon examples
327-
if (epsilon == 0) and (len(adv_examples) < 5):
321+
if epsilon == 0 and len(adv_examples) < 5:
328322
adv_ex = perturbed_data.squeeze().detach().cpu().numpy()
329323
adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) )
330324
else:
@@ -335,7 +329,7 @@ def test( model, device, test_loader, epsilon ):
335329

336330
# Calculate final accuracy for this epsilon
337331
final_acc = correct/float(len(test_loader))
338-
print("Epsilon: {}\tTest Accuracy = {} / {} = {}".format(epsilon, correct, len(test_loader), final_acc))
332+
print(f"Epsilon: {epsilon}\tTest Accuracy = {correct} / {len(test_loader)} = {final_acc}")
339333

340334
# Return the accuracy and an adversarial example
341335
return final_acc, adv_examples
@@ -421,9 +415,9 @@ def test( model, device, test_loader, epsilon ):
421415
plt.xticks([], [])
422416
plt.yticks([], [])
423417
if j == 0:
424-
plt.ylabel("Eps: {}".format(epsilons[i]), fontsize=14)
418+
plt.ylabel(f"Eps: {epsilons[i]}", fontsize=14)
425419
orig,adv,ex = examples[i][j]
426-
plt.title("{} -> {}".format(orig, adv))
420+
plt.title(f"{orig} -> {adv}")
427421
plt.imshow(ex, cmap="gray")
428422
plt.tight_layout()
429423
plt.show()

beginner_source/transfer_learning_tutorial.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,6 @@
3333
# License: BSD
3434
# Author: Sasank Chilamkurthy
3535

36-
from __future__ import print_function, division
37-
3836
import torch
3937
import torch.nn as nn
4038
import torch.optim as optim

conf.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434
import pytorch_sphinx_theme
3535
import torch
3636
import glob
37+
import random
3738
import shutil
3839
from custom_directives import IncludeDirective, GalleryItemDirective, CustomGalleryItemDirective, CustomCalloutItemDirective, CustomCardItemDirective
3940
import distutils.file_util
@@ -85,6 +86,11 @@
8586

8687
# -- Sphinx-gallery configuration --------------------------------------------
8788

89+
def reset_seeds(gallery_conf, fname):
90+
torch.manual_seed(42)
91+
torch.set_default_device(None)
92+
random.seed(10)
93+
8894
sphinx_gallery_conf = {
8995
'examples_dirs': ['beginner_source', 'intermediate_source',
9096
'advanced_source', 'recipes_source', 'prototype_source'],
@@ -94,7 +100,8 @@
94100
'backreferences_dir': None,
95101
'first_notebook_cell': ("# For tips on running notebooks in Google Colab, see\n"
96102
"# https://pytorch.org/tutorials/beginner/colab\n"
97-
"%matplotlib inline")
103+
"%matplotlib inline"),
104+
'reset_modules': (reset_seeds)
98105
}
99106

100107
if os.getenv('GALLERY_PATTERN'):

intermediate_source/char_rnn_classification_tutorial.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,6 @@
7474
``{language: [names ...]}``. The generic variables "category" and "line"
7575
(for language and name in our case) are used for later extensibility.
7676
"""
77-
from __future__ import unicode_literals, print_function, division
7877
from io import open
7978
import glob
8079
import os

intermediate_source/char_rnn_generation_tutorial.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,6 @@
7575
and end up with a dictionary ``{language: [names ...]}``.
7676
7777
"""
78-
from __future__ import unicode_literals, print_function, division
7978
from io import open
8079
import glob
8180
import os

intermediate_source/dynamic_quantization_bert_tutorial.rst

Lines changed: 2 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -92,8 +92,6 @@ In this step we import the necessary Python modules for the tutorial.
9292

9393
.. code:: python
9494
95-
from __future__ import absolute_import, division, print_function
96-
9795
import logging
9896
import numpy as np
9997
import os
@@ -255,8 +253,6 @@ model before and after the dynamic quantization.
255253
torch.manual_seed(seed)
256254
set_seed(42)
257255
258-
# Initialize a global random number generator
259-
global_rng = random.Random()
260256
261257
262258
2.2 Load the fine-tuned BERT model
@@ -528,20 +524,9 @@ We can serialize and save the quantized model for the future use using
528524

529525
.. code:: python
530526
531-
def ids_tensor(shape, vocab_size, rng=None, name=None):
527+
def ids_tensor(shape, vocab_size):
532528
# Creates a random int32 tensor of the shape within the vocab size
533-
if rng is None:
534-
rng = global_rng
535-
536-
total_dims = 1
537-
for dim in shape:
538-
total_dims *= dim
539-
540-
values = []
541-
for _ in range(total_dims):
542-
values.append(rng.randint(0, vocab_size - 1))
543-
544-
return torch.tensor(data=values, dtype=torch.long, device='cpu').view(shape).contiguous()
529+
return torch.randint(0, vocab_size, shape=shape, dtype=torch.int, device='cpu')
545530
546531
input_ids = ids_tensor([8, 128], 2)
547532
token_type_ids = ids_tensor([8, 128], 2)

intermediate_source/mario_rl_tutorial.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,8 @@
5353
# Super Mario environment for OpenAI Gym
5454
import gym_super_mario_bros
5555

56+
from tensordict import TensorDict
57+
from torchrl.data import TensorDictReplayBuffer, LazyMemmapStorage
5658

5759
######################################################################
5860
# RL Definitions
@@ -348,7 +350,7 @@ def act(self, state):
348350
class Mario(Mario): # subclassing for continuity
349351
def __init__(self, state_dim, action_dim, save_dir):
350352
super().__init__(state_dim, action_dim, save_dir)
351-
self.memory = deque(maxlen=100000)
353+
self.memory = TensorDictReplayBuffer(storage=LazyMemmapStorage(100000))
352354
self.batch_size = 32
353355

354356
def cache(self, state, next_state, action, reward, done):
@@ -373,14 +375,15 @@ def first_if_tuple(x):
373375
reward = torch.tensor([reward], device=self.device)
374376
done = torch.tensor([done], device=self.device)
375377

376-
self.memory.append((state, next_state, action, reward, done,))
378+
# self.memory.append((state, next_state, action, reward, done,))
379+
self.memory.add(TensorDict({"state": state, "next_state": next_state, "action": action, "reward": reward, "done": done}, batch_size=[]))
377380

378381
def recall(self):
379382
"""
380383
Retrieve a batch of experiences from memory
381384
"""
382-
batch = random.sample(self.memory, self.batch_size)
383-
state, next_state, action, reward, done = map(torch.stack, zip(*batch))
385+
batch = self.memory.sample(self.batch_size)
386+
state, next_state, action, reward, done = (batch.get(key) for key in ("state", "next_state", "action", "reward", "done"))
384387
return state, next_state, action.squeeze(), reward.squeeze(), done.squeeze()
385388

386389

intermediate_source/seq2seq_translation_tutorial.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@
4545
:alt:
4646
4747
To improve upon this model we'll use an `attention
48-
mechanism <https://arxiv.org/abs/1409.0473>`__, which lets the decoder
48+
mechanism <https://arxiv.org/abs/1508.04025>`__, which lets the decoder
4949
learn to focus over a specific range of the input sequence.
5050
5151
**Recommended Reading:**
@@ -66,8 +66,8 @@
6666
Statistical Machine Translation <https://arxiv.org/abs/1406.1078>`__
6767
- `Sequence to Sequence Learning with Neural
6868
Networks <https://arxiv.org/abs/1409.3215>`__
69-
- `Neural Machine Translation by Jointly Learning to Align and
70-
Translate <https://arxiv.org/abs/1409.0473>`__
69+
- `Effective Approaches to Attention-based Neural Machine
70+
Translation <https://arxiv.org/abs/1508.04025>`__
7171
- `A Neural Conversational Model <https://arxiv.org/abs/1506.05869>`__
7272
7373
You will also find the previous tutorials on
@@ -78,7 +78,6 @@
7878
7979
**Requirements**
8080
"""
81-
from __future__ import unicode_literals, print_function, division
8281
from io import open
8382
import unicodedata
8483
import string

intermediate_source/spatial_transformer_tutorial.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@
2727
# License: BSD
2828
# Author: Ghassen Hamrouni
2929

30-
from __future__ import print_function
3130
import torch
3231
import torch.nn as nn
3332
import torch.nn.functional as F

intermediate_source/torch_compile_tutorial.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@
6969

7070
def foo(x, y):
7171
a = torch.sin(x)
72-
b = torch.cos(x)
72+
b = torch.cos(y)
7373
return a + b
7474
opt_foo1 = torch.compile(foo)
7575
print(opt_foo1(torch.randn(10, 10), torch.randn(10, 10)))
@@ -80,7 +80,7 @@ def foo(x, y):
8080
@torch.compile
8181
def opt_foo2(x, y):
8282
a = torch.sin(x)
83-
b = torch.cos(x)
83+
b = torch.cos(y)
8484
return a + b
8585
print(opt_foo2(torch.randn(10, 10), torch.randn(10, 10)))
8686

@@ -105,7 +105,7 @@ def forward(self, x):
105105
#
106106
# Let's now demonstrate that using ``torch.compile`` can speed
107107
# up real models. We will compare standard eager mode and
108-
# ``torch.compile`` by evaluating and training ResNet-18 on random data.
108+
# ``torch.compile`` by evaluating and training a ``torchvision`` model on random data.
109109
#
110110
# Before we start, we need to define some utility functions.
111111

0 commit comments

Comments
 (0)