Skip to content

Commit 22cd08a

Browse files
authored
Merge branch 'main' into add_amx_doc
2 parents 1ae03c7 + 730029b commit 22cd08a

File tree

6 files changed

+23
-51
lines changed

6 files changed

+23
-51
lines changed

advanced_source/rpc_ddp_tutorial.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
Combining Distributed DataParallel with Distributed RPC Framework
22
=================================================================
3-
**Authors**: `Pritam Damania <https://github.com/pritamdamania87>`_ and `Yi Wang <https://github.com/SciPioneer>`_
3+
**Authors**: `Pritam Damania <https://github.com/pritamdamania87>`_ and `Yi Wang <https://github.com/wayi1>`_
44

55
.. note::
66
|edit| View and edit this tutorial in `github <https://github.com/pytorch/tutorials/blob/main/advanced_source/rpc_ddp_tutorial.rst>`__.

beginner_source/fgsm_tutorial.py

Lines changed: 9 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -98,13 +98,6 @@
9898
import numpy as np
9999
import matplotlib.pyplot as plt
100100

101-
# NOTE: This is a hack to get around "User-agent" limitations when downloading MNIST datasets
102-
# see, https://github.com/pytorch/vision/issues/3497 for more information
103-
from six.moves import urllib
104-
opener = urllib.request.build_opener()
105-
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
106-
urllib.request.install_opener(opener)
107-
108101

109102
######################################################################
110103
# Implementation
@@ -140,6 +133,8 @@
140133
epsilons = [0, .05, .1, .15, .2, .25, .3]
141134
pretrained_model = "data/lenet_mnist_model.pth"
142135
use_cuda=True
136+
# Set random seed for reproducibility
137+
torch.manual_seed(42)
143138

144139

145140
######################################################################
@@ -178,18 +173,18 @@ def forward(self, x):
178173
test_loader = torch.utils.data.DataLoader(
179174
datasets.MNIST('../data', train=False, download=True, transform=transforms.Compose([
180175
transforms.ToTensor(),
181-
])),
176+
])),
182177
batch_size=1, shuffle=True)
183178

184179
# Define what device we are using
185180
print("CUDA Available: ",torch.cuda.is_available())
186-
device = torch.device("cuda" if (use_cuda and torch.cuda.is_available()) else "cpu")
181+
device = torch.device("cuda" if use_cuda and torch.cuda.is_available() else "cpu")
187182

188183
# Initialize the network
189184
model = Net().to(device)
190185

191186
# Load the pretrained model
192-
model.load_state_dict(torch.load(pretrained_model, map_location='cpu'))
187+
model.load_state_dict(torch.load(pretrained_model, weights_only=True, map_location='cpu'))
193188

194189
# Set the model in evaluation mode. In this case this is for the Dropout layers
195190
model.eval()
@@ -289,7 +284,7 @@ def test( model, device, test_loader, epsilon ):
289284
if final_pred.item() == target.item():
290285
correct += 1
291286
# Special case for saving 0 epsilon examples
292-
if (epsilon == 0) and (len(adv_examples) < 5):
287+
if epsilon == 0 and len(adv_examples) < 5:
293288
adv_ex = perturbed_data.squeeze().detach().cpu().numpy()
294289
adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) )
295290
else:
@@ -300,7 +295,7 @@ def test( model, device, test_loader, epsilon ):
300295

301296
# Calculate final accuracy for this epsilon
302297
final_acc = correct/float(len(test_loader))
303-
print("Epsilon: {}\tTest Accuracy = {} / {} = {}".format(epsilon, correct, len(test_loader), final_acc))
298+
print(f"Epsilon: {epsilon}\tTest Accuracy = {correct} / {len(test_loader)} = {final_acc}")
304299

305300
# Return the accuracy and an adversarial example
306301
return final_acc, adv_examples
@@ -386,9 +381,9 @@ def test( model, device, test_loader, epsilon ):
386381
plt.xticks([], [])
387382
plt.yticks([], [])
388383
if j == 0:
389-
plt.ylabel("Eps: {}".format(epsilons[i]), fontsize=14)
384+
plt.ylabel(f"Eps: {epsilons[i]}", fontsize=14)
390385
orig,adv,ex = examples[i][j]
391-
plt.title("{} -> {}".format(orig, adv))
386+
plt.title(f"{orig} -> {adv}")
392387
plt.imshow(ex, cmap="gray")
393388
plt.tight_layout()
394389
plt.show()

intermediate_source/dynamic_quantization_bert_tutorial.rst

Lines changed: 2 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -253,8 +253,6 @@ model before and after the dynamic quantization.
253253
torch.manual_seed(seed)
254254
set_seed(42)
255255
256-
# Initialize a global random number generator
257-
global_rng = random.Random()
258256
259257
260258
2.2 Load the fine-tuned BERT model
@@ -526,20 +524,9 @@ We can serialize and save the quantized model for the future use using
526524

527525
.. code:: python
528526
529-
def ids_tensor(shape, vocab_size, rng=None, name=None):
527+
def ids_tensor(shape, vocab_size):
530528
# Creates a random int32 tensor of the shape within the vocab size
531-
if rng is None:
532-
rng = global_rng
533-
534-
total_dims = 1
535-
for dim in shape:
536-
total_dims *= dim
537-
538-
values = []
539-
for _ in range(total_dims):
540-
values.append(rng.randint(0, vocab_size - 1))
541-
542-
return torch.tensor(data=values, dtype=torch.long, device='cpu').view(shape).contiguous()
529+
return torch.randint(0, vocab_size, shape=shape, dtype=torch.int, device='cpu')
543530
544531
input_ids = ids_tensor([8, 128], 2)
545532
token_type_ids = ids_tensor([8, 128], 2)

intermediate_source/mario_rl_tutorial.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,8 @@
5353
# Super Mario environment for OpenAI Gym
5454
import gym_super_mario_bros
5555

56+
from tensordict import TensorDict
57+
from torchrl.data import TensorDictReplayBuffer, LazyMemmapStorage
5658

5759
######################################################################
5860
# RL Definitions
@@ -348,7 +350,7 @@ def act(self, state):
348350
class Mario(Mario): # subclassing for continuity
349351
def __init__(self, state_dim, action_dim, save_dir):
350352
super().__init__(state_dim, action_dim, save_dir)
351-
self.memory = deque(maxlen=100000)
353+
self.memory = TensorDictReplayBuffer(storage=LazyMemmapStorage(100000))
352354
self.batch_size = 32
353355

354356
def cache(self, state, next_state, action, reward, done):
@@ -373,14 +375,15 @@ def first_if_tuple(x):
373375
reward = torch.tensor([reward], device=self.device)
374376
done = torch.tensor([done], device=self.device)
375377

376-
self.memory.append((state, next_state, action, reward, done,))
378+
# self.memory.append((state, next_state, action, reward, done,))
379+
self.memory.add(TensorDict({"state": state, "next_state": next_state, "action": action, "reward": reward, "done": done}, batch_size=[]))
377380

378381
def recall(self):
379382
"""
380383
Retrieve a batch of experiences from memory
381384
"""
382-
batch = random.sample(self.memory, self.batch_size)
383-
state, next_state, action, reward, done = map(torch.stack, zip(*batch))
385+
batch = self.memory.sample(self.batch_size)
386+
state, next_state, action, reward, done = (batch.get(key) for key in ("state", "next_state", "action", "reward", "done"))
384387
return state, next_state, action.squeeze(), reward.squeeze(), done.squeeze()
385388

386389

intermediate_source/torch_compile_tutorial.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@
6969

7070
def foo(x, y):
7171
a = torch.sin(x)
72-
b = torch.cos(x)
72+
b = torch.cos(y)
7373
return a + b
7474
opt_foo1 = torch.compile(foo)
7575
print(opt_foo1(torch.randn(10, 10), torch.randn(10, 10)))
@@ -80,7 +80,7 @@ def foo(x, y):
8080
@torch.compile
8181
def opt_foo2(x, y):
8282
a = torch.sin(x)
83-
b = torch.cos(x)
83+
b = torch.cos(y)
8484
return a + b
8585
print(opt_foo2(torch.randn(10, 10), torch.randn(10, 10)))
8686

prototype_source/graph_mode_dynamic_bert_tutorial.rst

Lines changed: 2 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -60,22 +60,9 @@ Once all the necesessary packages are downloaded and installed we setup the code
6060
from torch.quantization import per_channel_dynamic_qconfig
6161
from torch.quantization import quantize_dynamic_jit
6262
63-
global_rng = random.Random()
64-
65-
def ids_tensor(shape, vocab_size, rng=None, name=None):
63+
def ids_tensor(shape, vocab_size):
6664
# Creates a random int32 tensor of the shape within the vocab size
67-
if rng is None:
68-
rng = global_rng
69-
70-
total_dims = 1
71-
for dim in shape:
72-
total_dims *= dim
73-
74-
values = []
75-
for _ in range(total_dims):
76-
values.append(rng.randint(0, vocab_size - 1))
77-
78-
return torch.tensor(data=values, dtype=torch.long, device='cpu').view(shape).contiguous()
65+
return torch.randint(0, vocab_size, shape=shape, dtype=torch.int, device='cpu')
7966
8067
# Setup logging
8168
logger = logging.getLogger(__name__)

0 commit comments

Comments
 (0)