Skip to content

Commit d541f74

Browse files
committed
[BE] Cleanup + set random seed
- Remove unnecessary brackets - Use f-strings - Set random seed for reproducibility
1 parent 1068abe commit d541f74

File tree

1 file changed

+9
-14
lines changed

1 file changed

+9
-14
lines changed

beginner_source/fgsm_tutorial.py

Lines changed: 9 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -98,13 +98,6 @@
9898
import numpy as np
9999
import matplotlib.pyplot as plt
100100

101-
# NOTE: This is a hack to get around "User-agent" limitations when downloading MNIST datasets
102-
# see, https://github.com/pytorch/vision/issues/3497 for more information
103-
from six.moves import urllib
104-
opener = urllib.request.build_opener()
105-
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
106-
urllib.request.install_opener(opener)
107-
108101

109102
######################################################################
110103
# Implementation
@@ -140,6 +133,8 @@
140133
epsilons = [0, .05, .1, .15, .2, .25, .3]
141134
pretrained_model = "data/lenet_mnist_model.pth"
142135
use_cuda=True
136+
# Set random seed for reproducibility
137+
torch.manual_seed(42)
143138

144139

145140
######################################################################
@@ -178,18 +173,18 @@ def forward(self, x):
178173
test_loader = torch.utils.data.DataLoader(
179174
datasets.MNIST('../data', train=False, download=True, transform=transforms.Compose([
180175
transforms.ToTensor(),
181-
])),
176+
])),
182177
batch_size=1, shuffle=True)
183178

184179
# Define what device we are using
185180
print("CUDA Available: ",torch.cuda.is_available())
186-
device = torch.device("cuda" if (use_cuda and torch.cuda.is_available()) else "cpu")
181+
device = torch.device("cuda" if use_cuda and torch.cuda.is_available() else "cpu")
187182

188183
# Initialize the network
189184
model = Net().to(device)
190185

191186
# Load the pretrained model
192-
model.load_state_dict(torch.load(pretrained_model, map_location='cpu'))
187+
model.load_state_dict(torch.load(pretrained_model, weights_only=True, map_location='cpu'))
193188

194189
# Set the model in evaluation mode. In this case this is for the Dropout layers
195190
model.eval()
@@ -289,7 +284,7 @@ def test( model, device, test_loader, epsilon ):
289284
if final_pred.item() == target.item():
290285
correct += 1
291286
# Special case for saving 0 epsilon examples
292-
if (epsilon == 0) and (len(adv_examples) < 5):
287+
if epsilon == 0 and len(adv_examples) < 5:
293288
adv_ex = perturbed_data.squeeze().detach().cpu().numpy()
294289
adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) )
295290
else:
@@ -300,7 +295,7 @@ def test( model, device, test_loader, epsilon ):
300295

301296
# Calculate final accuracy for this epsilon
302297
final_acc = correct/float(len(test_loader))
303-
print("Epsilon: {}\tTest Accuracy = {} / {} = {}".format(epsilon, correct, len(test_loader), final_acc))
298+
print(f"Epsilon: {epsilon}\tTest Accuracy = {correct} / {len(test_loader)} = {final_acc}")
304299

305300
# Return the accuracy and an adversarial example
306301
return final_acc, adv_examples
@@ -386,9 +381,9 @@ def test( model, device, test_loader, epsilon ):
386381
plt.xticks([], [])
387382
plt.yticks([], [])
388383
if j == 0:
389-
plt.ylabel("Eps: {}".format(epsilons[i]), fontsize=14)
384+
plt.ylabel(f"Eps: {epsilons[i]}", fontsize=14)
390385
orig,adv,ex = examples[i][j]
391-
plt.title("{} -> {}".format(orig, adv))
386+
plt.title(f"{orig} -> {adv}")
392387
plt.imshow(ex, cmap="gray")
393388
plt.tight_layout()
394389
plt.show()

0 commit comments

Comments
 (0)