From 49b3aad7c85c26b053bc964c8babad079b95afd1 Mon Sep 17 00:00:00 2001 From: Nikita Shulga Date: Thu, 6 Oct 2022 23:09:42 -0700 Subject: [PATCH] Update `Tensor Gradients and Jacobian Products` example Shape of gradient argument of `x.backwars` method should be that of `x`. Modify example, to make input tensor a 5x4 matrix and transpose it in the output Fixes https://github.com/pytorch/tutorials/issues/2065 --- beginner_source/basics/autogradqs_tutorial.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/beginner_source/basics/autogradqs_tutorial.py b/beginner_source/basics/autogradqs_tutorial.py index f1ac1aa54a1..ef05ad4aaa6 100644 --- a/beginner_source/basics/autogradqs_tutorial.py +++ b/beginner_source/basics/autogradqs_tutorial.py @@ -203,14 +203,14 @@ # compute the product: # -inp = torch.eye(5, requires_grad=True) -out = (inp+1).pow(2) -out.backward(torch.ones_like(inp), retain_graph=True) +inp = torch.eye(4, 5, requires_grad=True) +out = (inp+1).pow(2).t() +out.backward(torch.ones_like(out), retain_graph=True) print(f"First call\n{inp.grad}") -out.backward(torch.ones_like(inp), retain_graph=True) +out.backward(torch.ones_like(out), retain_graph=True) print(f"\nSecond call\n{inp.grad}") inp.grad.zero_() -out.backward(torch.ones_like(inp), retain_graph=True) +out.backward(torch.ones_like(out), retain_graph=True) print(f"\nCall after zeroing gradients\n{inp.grad}")