Skip to content

Commit 0a93a8b

Browse files
0ssamaak0svekars
andauthored
change torch.cuda.amp.GradScaler to torch.GradScaler("cuda") (#3257)
* change torch.cuda.amp.GradScaler to torch.GradScaler("cuda") * changing torch.GradScaler to torch.amp.GradScaler --------- Co-authored-by: Svetlana Karslioglu <svekars@meta.com>
1 parent 90a7da8 commit 0a93a8b

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

recipes_source/recipes/amp_recipe.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,7 @@ def make_model(in_size, out_size, num_layers):
150150
# The same ``GradScaler`` instance should be used for the entire convergence run.
151151
# If you perform multiple convergence runs in the same script, each run should use
152152
# a dedicated fresh ``GradScaler`` instance. ``GradScaler`` instances are lightweight.
153-
scaler = torch.cuda.amp.GradScaler()
153+
scaler = torch.amp.GradScaler("cuda")
154154

155155
for epoch in range(0): # 0 epochs, this section is for illustration only
156156
for input, target in zip(data, targets):
@@ -182,7 +182,7 @@ def make_model(in_size, out_size, num_layers):
182182

183183
net = make_model(in_size, out_size, num_layers)
184184
opt = torch.optim.SGD(net.parameters(), lr=0.001)
185-
scaler = torch.cuda.amp.GradScaler(enabled=use_amp)
185+
scaler = torch.amp.GradScaler("cuda" ,enabled=use_amp)
186186

187187
start_timer()
188188
for epoch in range(epochs):

0 commit comments

Comments
 (0)