From 15882369bf8a1f8294be50798044e4d93c42df9e Mon Sep 17 00:00:00 2001 From: Henry Chen Date: Sat, 6 May 2017 22:46:30 -0500 Subject: [PATCH 1/2] Use torch.arange instead of torch.range (to be deprecated) --- beginner_source/blitz/neural_networks_tutorial.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beginner_source/blitz/neural_networks_tutorial.py b/beginner_source/blitz/neural_networks_tutorial.py index 39d37cbc2d0..f4952590380 100644 --- a/beginner_source/blitz/neural_networks_tutorial.py +++ b/beginner_source/blitz/neural_networks_tutorial.py @@ -157,7 +157,7 @@ def num_flat_features(self, x): # For example: output = net(input) -target = Variable(torch.range(1, 10)) # a dummy target, for example +target = Variable(torch.arange(1, 11)) # a dummy target, for example criterion = nn.MSELoss() loss = criterion(output, target) From 333cab984c31523a92ef88b69d9a5d43eca7e582 Mon Sep 17 00:00:00 2001 From: Henry Chen Date: Sat, 6 May 2017 23:31:21 -0500 Subject: [PATCH 2/2] Fix attribute names after refactoring creator -> grad_fn previous_functions -> next_functions pytorch commit: https://github.com/pytorch/pytorch/commit/2ca787fcf40d65065a2cf036d238f748ec18d227 --- beginner_source/blitz/autograd_tutorial.py | 8 ++++---- beginner_source/blitz/neural_networks_tutorial.py | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/beginner_source/blitz/autograd_tutorial.py b/beginner_source/blitz/autograd_tutorial.py index 4194d14439b..692ee6f79fc 100644 --- a/beginner_source/blitz/autograd_tutorial.py +++ b/beginner_source/blitz/autograd_tutorial.py @@ -36,9 +36,9 @@ ``Variable`` and ``Function`` are interconnected and build up an acyclic graph, that encodes a complete history of computation. Each variable has -a ``.creator`` attribute that references a ``Function`` that has created +a ``.grad_fn`` attribute that references a ``Function`` that has created the ``Variable`` (except for Variables created by the user - their -``creator is None``). +``grad_fn is None``). If you want to compute the derivatives, you can call ``.backward()`` on a ``Variable``. If ``Variable`` is a scalar (i.e. it holds a one element @@ -61,8 +61,8 @@ print(y) ############################################################### -# ``y`` was created as a result of an operation, so it has a creator. -print(y.creator) +# ``y`` was created as a result of an operation, so it has a ``grad_fn``. +print(y.grad_fn) ############################################################### # Do more operations on y diff --git a/beginner_source/blitz/neural_networks_tutorial.py b/beginner_source/blitz/neural_networks_tutorial.py index f4952590380..753447446e3 100644 --- a/beginner_source/blitz/neural_networks_tutorial.py +++ b/beginner_source/blitz/neural_networks_tutorial.py @@ -165,7 +165,7 @@ def num_flat_features(self, x): ######################################################################## # Now, if you follow ``loss`` in the backward direction, using it’s -# ``.creator`` attribute, you will see a graph of computations that looks +# ``.grad_fn`` attribute, you will see a graph of computations that looks # like this: # # :: @@ -181,9 +181,9 @@ def num_flat_features(self, x): # # For illustration, let us follow a few steps backward: -print(loss.creator) # MSELoss -print(loss.creator.previous_functions[0][0]) # Linear -print(loss.creator.previous_functions[0][0].previous_functions[0][0]) # ReLU +print(loss.grad_fn) # MSELoss +print(loss.grad_fn.next_functions[0][0]) # Linear +print(loss.grad_fn.next_functions[0][0].next_functions[0][0]) # ReLU ######################################################################## # Backprop