diff --git a/beginner_source/blitz/neural_networks_tutorial.py b/beginner_source/blitz/neural_networks_tutorial.py index 4549089a5c4..68eab4ac88d 100644 --- a/beginner_source/blitz/neural_networks_tutorial.py +++ b/beginner_source/blitz/neural_networks_tutorial.py @@ -176,8 +176,9 @@ def num_flat_features(self, x): # -> loss # # So, when we call ``loss.backward()``, the whole graph is differentiated -# w.r.t. the loss, and all Tensors in the graph that have ``requires_grad=True`` -# will have their ``.grad`` Tensor accumulated with the gradient. +# w.r.t. the neural net parameters, and all Tensors in the graph that have +# ``requires_grad=True`` will have their ``.grad`` Tensor accumulated with the +# gradient. # # For illustration, let us follow a few steps backward: