Skip to content

Commit 178aa6e

Browse files
author
Svetlana Karslioglu
authored
Merge branch 'master' into fix-cards-60min-blitz
2 parents ff7c19e + f007848 commit 178aa6e

12 files changed

+80
-55
lines changed

.jenkins/validate_tutorials_built.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,7 @@
2222
"former_torchies/tensor_tutorial_old",
2323
"examples_autograd/polynomial_autograd",
2424
"examples_autograd/polynomial_custom_function",
25-
"forward_ad_usage",
2625
"parametrizations",
27-
"reinforcement_q_learning",
2826
"mnist_train_nas", # used by ax_multiobjective_nas_tutorial.py
2927
"fx_conv_bn_fuser",
3028
"super_resolution_with_onnxruntime",

beginner_source/basics/autogradqs_tutorial.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -203,14 +203,14 @@
203203
# compute the product:
204204
#
205205

206-
inp = torch.eye(5, requires_grad=True)
207-
out = (inp+1).pow(2)
208-
out.backward(torch.ones_like(inp), retain_graph=True)
206+
inp = torch.eye(4, 5, requires_grad=True)
207+
out = (inp+1).pow(2).t()
208+
out.backward(torch.ones_like(out), retain_graph=True)
209209
print(f"First call\n{inp.grad}")
210-
out.backward(torch.ones_like(inp), retain_graph=True)
210+
out.backward(torch.ones_like(out), retain_graph=True)
211211
print(f"\nSecond call\n{inp.grad}")
212212
inp.grad.zero_()
213-
out.backward(torch.ones_like(inp), retain_graph=True)
213+
out.backward(torch.ones_like(out), retain_graph=True)
214214
print(f"\nCall after zeroing gradients\n{inp.grad}")
215215

216216

beginner_source/basics/buildmodel_tutorial.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ def forward(self, x):
7777
# along with some `background operations <https://github.com/pytorch/pytorch/blob/270111b7b611d174967ed204776985cefca9c144/torch/nn/modules/module.py#L866>`_.
7878
# Do not call ``model.forward()`` directly!
7979
#
80-
# Calling the model on the input returns a 10-dimensional tensor with raw predicted values for each class.
80+
# Calling the model on the input returns a 2-dimensional tensor with dim=0 corresponding to each output of 10 raw predicted values for each class, and dim=1 corresponding to the individual values of each output. .
8181
# We get the prediction probabilities by passing it through an instance of the ``nn.Softmax`` module.
8282

8383
X = torch.rand(1, 28, 28, device=device)

beginner_source/basics/quickstart_tutorial.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@
9393
# Define model
9494
class NeuralNetwork(nn.Module):
9595
def __init__(self):
96-
super(NeuralNetwork, self).__init__()
96+
super().__init__()
9797
self.flatten = nn.Flatten()
9898
self.linear_relu_stack = nn.Sequential(
9999
nn.Linear(28*28, 512),

beginner_source/fgsm_tutorial.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
machine learning. You may be surprised to find that adding imperceptible
1818
perturbations to an image *can* cause drastically different model
1919
performance. Given that this is a tutorial, we will explore the topic
20-
via example on an image classifier. Specifically we will use one of the
20+
via example on an image classifier. Specifically, we will use one of the
2121
first and most popular attack methods, the Fast Gradient Sign Attack
2222
(FGSM), to fool an MNIST classifier.
2323

beginner_source/transformer_tutorial.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -134,6 +134,11 @@ def forward(self, x: Tensor) -> Tensor:
134134
######################################################################
135135
# This tutorial uses ``torchtext`` to generate Wikitext-2 dataset.
136136
# To access torchtext datasets, please install torchdata following instructions at https://github.com/pytorch/data.
137+
# %%
138+
# .. code-block:: bash
139+
#
140+
# %%bash
141+
# pip install torchdata
137142
#
138143
# The vocab object is built based on the train dataset and is used to numericalize
139144
# tokens into tensors. Wikitext-2 represents rare tokens as `<unk>`.

beginner_source/translation_transformer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -309,7 +309,7 @@ def train_epoch(model, optimizer):
309309
optimizer.step()
310310
losses += loss.item()
311311

312-
return losses / len(train_dataloader)
312+
return losses / len(list(train_dataloader))
313313

314314

315315
def evaluate(model):
@@ -333,7 +333,7 @@ def evaluate(model):
333333
loss = loss_fn(logits.reshape(-1, logits.shape[-1]), tgt_out.reshape(-1))
334334
losses += loss.item()
335335

336-
return losses / len(val_dataloader)
336+
return losses / len(list(val_dataloader))
337337

338338
######################################################################
339339
# Now we have all the ingredients to train our model. Let's do it!

intermediate_source/mario_rl_tutorial.py

Lines changed: 31 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -31,8 +31,10 @@
3131
######################################################################
3232
#
3333
#
34-
35-
# !pip install gym-super-mario-bros==7.3.0
34+
# .. code-block:: bash
35+
#
36+
# %%bash
37+
# pip install gym-super-mario-bros==7.4.0
3638

3739
import torch
3840
from torch import nn
@@ -95,16 +97,19 @@
9597
# (next) state, reward and other info.
9698
#
9799

98-
# Initialize Super Mario environment
99-
env = gym_super_mario_bros.make("SuperMarioBros-1-1-v0")
100+
# Initialize Super Mario environment (in v0.26 change render mode to 'human' to see results on the screen)
101+
if gym.__version__ < '0.26':
102+
env = gym_super_mario_bros.make("SuperMarioBros-1-1-v0", new_step_api=True)
103+
else:
104+
env = gym_super_mario_bros.make("SuperMarioBros-1-1-v0", render_mode='rgb', apply_api_compatibility=True)
100105

101106
# Limit the action-space to
102107
# 0. walk right
103108
# 1. jump right
104109
env = JoypadSpace(env, [["right"], ["right", "A"]])
105110

106111
env.reset()
107-
next_state, reward, done, info = env.step(action=0)
112+
next_state, reward, done, trunc, info = env.step(action=0)
108113
print(f"{next_state.shape},\n {reward},\n {done},\n {info}")
109114

110115

@@ -151,14 +156,13 @@ def __init__(self, env, skip):
151156
def step(self, action):
152157
"""Repeat action, and sum reward"""
153158
total_reward = 0.0
154-
done = False
155159
for i in range(self._skip):
156160
# Accumulate reward and repeat the same action
157-
obs, reward, done, info = self.env.step(action)
161+
obs, reward, done, trunk, info = self.env.step(action)
158162
total_reward += reward
159163
if done:
160164
break
161-
return obs, total_reward, done, info
165+
return obs, total_reward, done, trunk, info
162166

163167

164168
class GrayScaleObservation(gym.ObservationWrapper):
@@ -203,7 +207,10 @@ def observation(self, observation):
203207
env = SkipFrame(env, skip=4)
204208
env = GrayScaleObservation(env)
205209
env = ResizeObservation(env, shape=84)
206-
env = FrameStack(env, num_stack=4)
210+
if gym.__version__ < '0.26':
211+
env = FrameStack(env, num_stack=4, new_step_api=True)
212+
else:
213+
env = FrameStack(env, num_stack=4)
207214

208215

209216
######################################################################
@@ -283,12 +290,11 @@ def __init__(self, state_dim, action_dim, save_dir):
283290
self.action_dim = action_dim
284291
self.save_dir = save_dir
285292

286-
self.use_cuda = torch.cuda.is_available()
293+
self.device = "cuda" if torch.cuda.is_available() else "cpu"
287294

288295
# Mario's DNN to predict the most optimal action - we implement this in the Learn section
289296
self.net = MarioNet(self.state_dim, self.action_dim).float()
290-
if self.use_cuda:
291-
self.net = self.net.to(device="cuda")
297+
self.net = self.net.to(device=self.device)
292298

293299
self.exploration_rate = 1
294300
self.exploration_rate_decay = 0.99999975
@@ -312,12 +318,8 @@ def act(self, state):
312318

313319
# EXPLOIT
314320
else:
315-
state = state.__array__()
316-
if self.use_cuda:
317-
state = torch.tensor(state).cuda()
318-
else:
319-
state = torch.tensor(state)
320-
state = state.unsqueeze(0)
321+
state = state[0].__array__() if isinstance(state, tuple) else state.__array__()
322+
state = torch.tensor(state, device=self.device).unsqueeze(0)
321323
action_values = self.net(state, model="online")
322324
action_idx = torch.argmax(action_values, axis=1).item()
323325

@@ -363,21 +365,16 @@ def cache(self, state, next_state, action, reward, done):
363365
reward (float),
364366
done(bool))
365367
"""
366-
state = state.__array__()
367-
next_state = next_state.__array__()
368-
369-
if self.use_cuda:
370-
state = torch.tensor(state).cuda()
371-
next_state = torch.tensor(next_state).cuda()
372-
action = torch.tensor([action]).cuda()
373-
reward = torch.tensor([reward]).cuda()
374-
done = torch.tensor([done]).cuda()
375-
else:
376-
state = torch.tensor(state)
377-
next_state = torch.tensor(next_state)
378-
action = torch.tensor([action])
379-
reward = torch.tensor([reward])
380-
done = torch.tensor([done])
368+
def first_if_tuple(x):
369+
return x[0] if isinstance(x, tuple) else x
370+
state = first_if_tuple(state).__array__()
371+
next_state = first_if_tuple(next_state).__array__()
372+
373+
state = torch.tensor(state, device=self.device)
374+
next_state = torch.tensor(next_state, device=self.device)
375+
action = torch.tensor([action], device=self.device)
376+
reward = torch.tensor([reward], device=self.device)
377+
done = torch.tensor([done], device=self.device)
381378

382379
self.memory.append((state, next_state, action, reward, done,))
383380

@@ -753,7 +750,7 @@ def record(self, episode, epsilon, step):
753750
action = mario.act(state)
754751

755752
# Agent performs action
756-
next_state, reward, done, info = env.step(action)
753+
next_state, reward, done, trunc, info = env.step(action)
757754

758755
# Remember
759756
mario.cache(state, next_state, action, reward, done)

intermediate_source/reinforcement_q_learning.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,12 @@
4646
4747
First, let's import needed packages. Firstly, we need
4848
`gym <https://github.com/openai/gym>`__ for the environment
49-
(Install using `pip install gym`).
49+
50+
.. code-block:: bash
51+
52+
%%bash
53+
pip3 install gym[classic_control]
54+
5055
We'll also use the following from PyTorch:
5156
5257
- neural networks (``torch.nn``)
@@ -74,7 +79,10 @@
7479
import torchvision.transforms as T
7580

7681

77-
env = gym.make('CartPole-v0', new_step_api=True, render_mode='single_rgb_array').unwrapped
82+
if gym.__version__ < '0.26':
83+
env = gym.make('CartPole-v0', new_step_api=True, render_mode='single_rgb_array').unwrapped
84+
else:
85+
env = gym.make('CartPole-v0', render_mode='rgb_array').unwrapped
7886

7987
# set up matplotlib
8088
is_ipython = 'inline' in matplotlib.get_backend()

intermediate_source/text_to_speech_with_torchaudio.py

Lines changed: 19 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -293,12 +293,28 @@ def text_to_sequence(text):
293293
# Waveglow
294294
# ~~~~~~~~
295295
#
296-
# Waveglow is a vocoder published by Nvidia. The pretrained weight is
297-
# publishe on Torch Hub. One can instantiate the model using ``torch.hub``
296+
# Waveglow is a vocoder published by Nvidia. The pretrained weights are
297+
# published on Torch Hub. One can instantiate the model using ``torch.hub``
298298
# module.
299299
#
300+
if torch.cuda.is_available():
301+
waveglow = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_waveglow', model_math='fp32')
302+
else:
303+
# Workaround to load model mapped on GPU
304+
# https://stackoverflow.com/a/61840832
305+
waveglow = torch.hub.load(
306+
"NVIDIA/DeepLearningExamples:torchhub",
307+
"nvidia_waveglow",
308+
model_math="fp32",
309+
pretrained=False,
310+
)
311+
checkpoint = torch.hub.load_state_dict_from_url(
312+
"https://api.ngc.nvidia.com/v2/models/nvidia/waveglow_ckpt_fp32/versions/19.09.0/files/nvidia_waveglowpyt_fp32_20190427",
313+
progress=False,
314+
map_location=device,
315+
)
316+
state_dict = {key.replace("module.", ""): value for key, value in checkpoint["state_dict"].items()}
300317

301-
waveglow = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_waveglow', model_math='fp32')
302318
waveglow = waveglow.remove_weightnorm(waveglow)
303319
waveglow = waveglow.to(device)
304320
waveglow.eval()

recipes_source/intel_extension_for_pytorch.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ Float32
9898
model.set_state_dict(torch.load(PATH))
9999
optimizer.set_state_dict(torch.load(PATH))
100100
# Invoke optimize function against the model object and optimizer object
101-
model, optimizer = ipex.optimize(model, optimizer, dtype=torch.float32)
101+
model, optimizer = ipex.optimize(model, optimizer=optimizer)
102102
103103
for images, label in train_loader():
104104
# Setting memory_format to torch.channels_last could improve performance with 4D input data. This is optional.
@@ -131,7 +131,7 @@ BFloat16
131131
model.set_state_dict(torch.load(PATH))
132132
optimizer.set_state_dict(torch.load(PATH))
133133
# Invoke optimize function against the model object and optimizer object with data type set to torch.bfloat16
134-
model, optimizer = ipex.optimize(model, optimizer, dtype=torch.bfloat16)
134+
model, optimizer = ipex.optimize(model, optimizer=optimizer, dtype=torch.bfloat16)
135135
136136
for images, label in train_loader():
137137
with torch.cpu.amp.autocast():

requirements.txt

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ scikit-image
4343
scipy
4444
pillow==9.0.1
4545
wget
46-
gym==0.24.0
47-
gym-super-mario-bros==7.3.0
46+
gym==0.25.1
47+
gym-super-mario-bros==7.4.0
4848
timm
49+
pygame==2.1.2

0 commit comments

Comments
 (0)