diff --git a/beginner_source/blitz/neural_networks_tutorial.py b/beginner_source/blitz/neural_networks_tutorial.py index 168572c4b14..a805cdeb3af 100644 --- a/beginner_source/blitz/neural_networks_tutorial.py +++ b/beginner_source/blitz/neural_networks_tutorial.py @@ -176,7 +176,7 @@ def num_flat_features(self, x): # -> loss # # So, when we call ``loss.backward()``, the whole graph is differentiated -# w.r.t. the loss, and all Tensors in the graph that has ``requires_grad=True`` +# w.r.t. the loss, and all Tensors in the graph that have ``requires_grad=True`` # will have their ``.grad`` Tensor accumulated with the gradient. # # For illustration, let us follow a few steps backward: