From c799cb575f993a8f8ec7117756697358575e0195 Mon Sep 17 00:00:00 2001 From: Kiersten Stokes Date: Fri, 27 Jan 2023 12:29:37 -0600 Subject: [PATCH] Move code to non-executable block Signed-off-by: Kiersten Stokes --- .../blitz/neural_networks_tutorial.py | 31 +++++++++++-------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/beginner_source/blitz/neural_networks_tutorial.py b/beginner_source/blitz/neural_networks_tutorial.py index ba1b1754692..3b3c95fd229 100644 --- a/beginner_source/blitz/neural_networks_tutorial.py +++ b/beginner_source/blitz/neural_networks_tutorial.py @@ -219,7 +219,9 @@ def forward(self, x): # The simplest update rule used in practice is the Stochastic Gradient # Descent (SGD): # -# ``weight = weight - learning_rate * gradient`` +# .. code:: python +# +# weight = weight - learning_rate * gradient # # We can implement this using simple Python code: # @@ -233,18 +235,21 @@ def forward(self, x): # update rules such as SGD, Nesterov-SGD, Adam, RMSProp, etc. # To enable this, we built a small package: ``torch.optim`` that # implements all these methods. Using it is very simple: - -import torch.optim as optim - -# create your optimizer -optimizer = optim.SGD(net.parameters(), lr=0.01) - -# in your training loop: -optimizer.zero_grad() # zero the gradient buffers -output = net(input) -loss = criterion(output, target) -loss.backward() -optimizer.step() # Does the update +# +# .. code:: python +# +# import torch.optim as optim +# +# # create your optimizer +# optimizer = optim.SGD(net.parameters(), lr=0.01) +# +# # in your training loop: +# optimizer.zero_grad() # zero the gradient buffers +# output = net(input) +# loss = criterion(output, target) +# loss.backward() +# optimizer.step() # Does the update +# ###############################################################