Skip to content

Commit 3aa0743

Browse files
author
Svetlana Karslioglu
authored
Merge branch 'main' into svekars-patch-9
2 parents 10fb7a8 + ea0a11c commit 3aa0743

File tree

11 files changed

+546
-92
lines changed

11 files changed

+546
-92
lines changed

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ download:
8282
tar $(TAROPTS) -xzf $(DATADIR)/UrbanSound8K.tar.gz -C ./beginner_source/data/
8383

8484
# Download model for beginner_source/fgsm_tutorial.py
85-
wget -nv -N https://s3.amazonaws.com/pytorch-tutorial-assets/lenet_mnist_model.pth -P $(DATADIR)
85+
wget -nv -N 'https://docs.google.com/uc?export=download&id=1HJV2nUHJqclXQ8flKvcWmjZ-OU5DGatl' -O $(DATADIR)/lenet_mnist_model.pth
8686
cp $(DATADIR)/lenet_mnist_model.pth ./beginner_source/data/lenet_mnist_model.pth
8787

8888
# Download model for advanced_source/dynamic_quantization_tutorial.py

advanced_source/neural_style_tutorial.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
developed by Leon A. Gatys, Alexander S. Ecker and Matthias Bethge.
1515
Neural-Style, or Neural-Transfer, allows you to take an image and
1616
reproduce it with a new artistic style. The algorithm takes three images,
17-
an input image, a content-image, and a style-image, and changes the input
17+
an input image, a content-image, and a style-image, and changes the input
1818
to resemble the content of the content-image and the artistic style of the style-image.
1919
2020
@@ -70,6 +70,7 @@
7070
# method is used to move tensors or modules to a desired device.
7171

7272
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
73+
torch.set_default_device(device)
7374

7475
######################################################################
7576
# Loading the Images
@@ -261,7 +262,7 @@ def forward(self, input):
261262
# network to evaluation mode using ``.eval()``.
262263
#
263264

264-
cnn = models.vgg19(pretrained=True).features.to(device).eval()
265+
cnn = models.vgg19(pretrained=True).features.eval()
265266

266267

267268

@@ -271,8 +272,8 @@ def forward(self, input):
271272
# We will use them to normalize the image before sending it into the network.
272273
#
273274

274-
cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(device)
275-
cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(device)
275+
cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406])
276+
cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225])
276277

277278
# create a module to normalize input image so we can easily put it in a
278279
# ``nn.Sequential``
@@ -308,7 +309,7 @@ def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
308309
content_layers=content_layers_default,
309310
style_layers=style_layers_default):
310311
# normalization module
311-
normalization = Normalization(normalization_mean, normalization_std).to(device)
312+
normalization = Normalization(normalization_mean, normalization_std)
312313

313314
# just in order to have an iterable access to or list of content/style
314315
# losses
@@ -373,7 +374,7 @@ def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
373374
#
374375
# ::
375376
#
376-
# input_img = torch.randn(content_img.data.size(), device=device)
377+
# input_img = torch.randn(content_img.data.size())
377378

378379
# add the original input image to the figure:
379380
plt.figure()

beginner_source/examples_autograd/polynomial_autograd.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -18,23 +18,23 @@
1818
import math
1919

2020
dtype = torch.float
21-
device = torch.device("cpu")
22-
# device = torch.device("cuda:0") # Uncomment this to run on GPU
21+
device = "cuda" if torch.cuda.is_available() else "cpu"
22+
torch.set_default_device(device)
2323

2424
# Create Tensors to hold input and outputs.
2525
# By default, requires_grad=False, which indicates that we do not need to
2626
# compute gradients with respect to these Tensors during the backward pass.
27-
x = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype)
27+
x = torch.linspace(-math.pi, math.pi, 2000, dtype=dtype)
2828
y = torch.sin(x)
2929

3030
# Create random Tensors for weights. For a third order polynomial, we need
3131
# 4 weights: y = a + b x + c x^2 + d x^3
3232
# Setting requires_grad=True indicates that we want to compute gradients with
3333
# respect to these Tensors during the backward pass.
34-
a = torch.randn((), device=device, dtype=dtype, requires_grad=True)
35-
b = torch.randn((), device=device, dtype=dtype, requires_grad=True)
36-
c = torch.randn((), device=device, dtype=dtype, requires_grad=True)
37-
d = torch.randn((), device=device, dtype=dtype, requires_grad=True)
34+
a = torch.randn((), dtype=dtype, requires_grad=True)
35+
b = torch.randn((), dtype=dtype, requires_grad=True)
36+
c = torch.randn((), dtype=dtype, requires_grad=True)
37+
d = torch.randn((), dtype=dtype, requires_grad=True)
3838

3939
learning_rate = 1e-6
4040
for t in range(2000):

beginner_source/fgsm_tutorial.py

Lines changed: 50 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@
123123
# - ``pretrained_model`` - path to the pretrained MNIST model which was
124124
# trained with
125125
# `pytorch/examples/mnist <https://github.com/pytorch/examples/tree/master/mnist>`__.
126-
# For simplicity, download the pretrained model `here <https://drive.google.com/drive/folders/1fn83DF14tWmit0RTKWRhPq5uVXt73e0h?usp=sharing>`__.
126+
# For simplicity, download the pretrained model `here <https://drive.google.com/file/d/1HJV2nUHJqclXQ8flKvcWmjZ-OU5DGatl/view?usp=drive_link>`__.
127127
#
128128
# - ``use_cuda`` - boolean flag to use CUDA if desired and available.
129129
# Note, a GPU with CUDA is not critical for this tutorial as a CPU will
@@ -154,26 +154,34 @@
154154
class Net(nn.Module):
155155
def __init__(self):
156156
super(Net, self).__init__()
157-
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
158-
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
159-
self.conv2_drop = nn.Dropout2d()
160-
self.fc1 = nn.Linear(320, 50)
161-
self.fc2 = nn.Linear(50, 10)
157+
self.conv1 = nn.Conv2d(1, 32, 3, 1)
158+
self.conv2 = nn.Conv2d(32, 64, 3, 1)
159+
self.dropout1 = nn.Dropout(0.25)
160+
self.dropout2 = nn.Dropout(0.5)
161+
self.fc1 = nn.Linear(9216, 128)
162+
self.fc2 = nn.Linear(128, 10)
162163

163164
def forward(self, x):
164-
x = F.relu(F.max_pool2d(self.conv1(x), 2))
165-
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
166-
x = x.view(-1, 320)
167-
x = F.relu(self.fc1(x))
168-
x = F.dropout(x, training=self.training)
165+
x = self.conv1(x)
166+
x = F.relu(x)
167+
x = self.conv2(x)
168+
x = F.relu(x)
169+
x = F.max_pool2d(x, 2)
170+
x = self.dropout1(x)
171+
x = torch.flatten(x, 1)
172+
x = self.fc1(x)
173+
x = F.relu(x)
174+
x = self.dropout2(x)
169175
x = self.fc2(x)
170-
return F.log_softmax(x, dim=1)
176+
output = F.log_softmax(x, dim=1)
177+
return output
171178

172179
# MNIST Test dataset and dataloader declaration
173180
test_loader = torch.utils.data.DataLoader(
174181
datasets.MNIST('../data', train=False, download=True, transform=transforms.Compose([
175182
transforms.ToTensor(),
176-
])),
183+
transforms.Normalize((0.1307,), (0.3081,)),
184+
])),
177185
batch_size=1, shuffle=True)
178186

179187
# Define what device we are using
@@ -184,7 +192,7 @@ def forward(self, x):
184192
model = Net().to(device)
185193

186194
# Load the pretrained model
187-
model.load_state_dict(torch.load(pretrained_model, weights_only=True, map_location='cpu'))
195+
model.load_state_dict(torch.load(pretrained_model, map_location=device))
188196

189197
# Set the model in evaluation mode. In this case this is for the Dropout layers
190198
model.eval()
@@ -219,6 +227,26 @@ def fgsm_attack(image, epsilon, data_grad):
219227
# Return the perturbed image
220228
return perturbed_image
221229

230+
# restores the tensors to their original scale
231+
def denorm(batch, mean=[0.1307], std=[0.3081]):
232+
"""
233+
Convert a batch of tensors to their original scale.
234+
235+
Args:
236+
batch (torch.Tensor): Batch of normalized tensors.
237+
mean (torch.Tensor or list): Mean used for normalization.
238+
std (torch.Tensor or list): Standard deviation used for normalization.
239+
240+
Returns:
241+
torch.Tensor: batch of tensors without normalization applied to them.
242+
"""
243+
if isinstance(mean, list):
244+
mean = torch.tensor(mean).to(device)
245+
if isinstance(std, list):
246+
std = torch.tensor(std).to(device)
247+
248+
return batch * std.view(1, -1, 1, 1) + mean.view(1, -1, 1, 1)
249+
222250

223251
######################################################################
224252
# Testing Function
@@ -273,11 +301,17 @@ def test( model, device, test_loader, epsilon ):
273301
# Collect ``datagrad``
274302
data_grad = data.grad.data
275303

304+
# Restore the data to its original scale
305+
data_denorm = denorm(data)
306+
276307
# Call FGSM Attack
277-
perturbed_data = fgsm_attack(data, epsilon, data_grad)
308+
perturbed_data = fgsm_attack(data_denorm, epsilon, data_grad)
309+
310+
# Reapply normalization
311+
perturbed_data_normalized = transforms.Normalize((0.1307,), (0.3081,))(perturbed_data)
278312

279313
# Re-classify the perturbed image
280-
output = model(perturbed_data)
314+
output = model(perturbed_data_normalized)
281315

282316
# Check for success
283317
final_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability

0 commit comments

Comments
 (0)