|
| 1 | +""" |
| 2 | +Changing default device |
| 3 | +======================= |
| 4 | +
|
| 5 | +It is common practice to write PyTorch code in a device-agnostic way, |
| 6 | +and then switch between CPU and CUDA depending on what hardware is available. |
| 7 | +Typically, to do this you might have used if-statements and ``cuda()`` calls |
| 8 | +to do this: |
| 9 | +
|
| 10 | +""" |
| 11 | +import torch |
| 12 | + |
| 13 | +USE_CUDA = False |
| 14 | + |
| 15 | +mod = torch.nn.Linear(20, 30) |
| 16 | +if USE_CUDA: |
| 17 | + mod.cuda() |
| 18 | + |
| 19 | +device = 'cpu' |
| 20 | +if USE_CUDA: |
| 21 | + device = 'cuda' |
| 22 | +inp = torch.randn(128, 20, device=device) |
| 23 | +print(mod(inp).device) |
| 24 | + |
| 25 | +################################################################### |
| 26 | +# PyTorch now also has a context manager which can take care of the |
| 27 | +# device transfer automatically. Here is an example: |
| 28 | + |
| 29 | +with torch.device('cuda'): |
| 30 | + mod = torch.nn.Linear(20, 30) |
| 31 | + print(mod.weight.device) |
| 32 | + print(mod(torch.randn(128, 20)).device) |
| 33 | + |
| 34 | +######################################### |
| 35 | +# You can also set it globally like this: |
| 36 | + |
| 37 | +torch.set_default_device('cuda') |
| 38 | + |
| 39 | +mod = torch.nn.Linear(20, 30) |
| 40 | +print(mod.weight.device) |
| 41 | +print(mod(torch.randn(128, 20)).device) |
| 42 | + |
| 43 | +################################################################ |
| 44 | +# This function imposes a slight performance cost on every Python |
| 45 | +# call to the torch API (not just factory functions). If this |
| 46 | +# is causing problems for you, please comment on |
| 47 | +# `this issue <https://github.com/pytorch/pytorch/issues/92701>`__ |
0 commit comments