diff --git a/beginner_source/nlp/pytorch_tutorial.py b/beginner_source/nlp/pytorch_tutorial.py index 62249bf2dc8..6ec1b82709c 100644 --- a/beginner_source/nlp/pytorch_tutorial.py +++ b/beginner_source/nlp/pytorch_tutorial.py @@ -9,7 +9,7 @@ All of deep learning is computations on tensors, which are generalizations of a matrix that can be indexed in more than 2 dimensions. We will see exactly what this means in-depth later. First, -lets look what we can do with tensors. +let's look what we can do with tensors. """ # Author: Robert Guthrie @@ -162,7 +162,7 @@ # other operation, etc.) # # If ``requires_grad=True``, the Tensor object keeps track of how it was -# created. Lets see it in action. +# created. Let's see it in action. # # Tensor factory methods have a ``requires_grad`` flag @@ -187,7 +187,7 @@ # But how does that help us compute a gradient? # -# Lets sum up all the entries in z +# Let's sum up all the entries in z s = z.sum() print(s) print(s.grad_fn) @@ -222,7 +222,7 @@ ###################################################################### -# Lets have Pytorch compute the gradient, and see that we were right: +# Let's have Pytorch compute the gradient, and see that we were right: # (note if you run this block multiple times, the gradient will increment. # That is because Pytorch *accumulates* the gradient into the .grad # property, since for many models this is very convenient.)