Skip to content

Commit 0331944

Browse files
authored
Merge branch 'main' into patch-1
2 parents c60a6fc + 49c1494 commit 0331944

File tree

5 files changed

+54
-47
lines changed

5 files changed

+54
-47
lines changed

.jenkins/build.sh

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,10 +24,10 @@ pip install --progress-bar off -r $DIR/../requirements.txt
2424

2525
#Install PyTorch Nightly for test.
2626
# Nightly - pip install --pre torch torchvision torchaudio -f https://download.pytorch.org/whl/nightly/cu102/torch_nightly.html
27-
# Install 2.2 for testing
28-
pip uninstall -y torch torchvision torchaudio torchtext torchdata
29-
pip3 install torch==2.2.0 torchvision torchaudio --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu121
30-
pip3 install torchdata torchtext --index-url https://download.pytorch.org/whl/test/cpu
27+
# Install 2.2 for testing - uncomment to install nightly binaries (update the version as needed).
28+
# pip uninstall -y torch torchvision torchaudio torchtext torchdata
29+
# pip3 install torch==2.2.0 torchvision torchaudio --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu121
30+
# pip3 install torchdata torchtext --index-url https://download.pytorch.org/whl/test/cpu
3131

3232
# Install two language tokenizers for Translation with TorchText tutorial
3333
python -m spacy download en_core_web_sm

advanced_source/coding_ddpg.py

Lines changed: 15 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -63,16 +63,25 @@
6363
# %%bash
6464
# pip3 install torchrl mujoco glfw
6565

66-
import torchrl
67-
import torch
68-
import tqdm
69-
from typing import Tuple
70-
7166
# sphinx_gallery_start_ignore
7267
import warnings
7368
warnings.filterwarnings("ignore")
69+
import multiprocessing
70+
# TorchRL prefers spawn method, that restricts creation of ``~torchrl.envs.ParallelEnv`` inside
71+
# `__main__` method call, but for the easy of reading the code switch to fork
72+
# which is also a default spawn method in Google's Colaboratory
73+
try:
74+
multiprocessing.set_start_method("fork")
75+
except RuntimeError:
76+
assert multiprocessing.get_start_method() == "fork"
7477
# sphinx_gallery_end_ignore
7578

79+
80+
import torchrl
81+
import torch
82+
import tqdm
83+
from typing import Tuple
84+
7685
###############################################################################
7786
# We will execute the policy on CUDA if available
7887
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
@@ -1219,6 +1228,6 @@ def ceil_div(x, y):
12191228
#
12201229
# To iterate further on this loss module we might consider:
12211230
#
1222-
# - Using `@dispatch` (see `[Feature] Distpatch IQL loss module <https://github.com/pytorch/rl/pull/1230>`_.
1231+
# - Using `@dispatch` (see `[Feature] Distpatch IQL loss module <https://github.com/pytorch/rl/pull/1230>`_.)
12231232
# - Allowing flexible TensorDict keys.
12241233
#

index.rst

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -3,15 +3,11 @@ Welcome to PyTorch Tutorials
33

44
What's new in PyTorch tutorials?
55

6-
* `Getting Started with Distributed Checkpoint (DCP) <https://pytorch.org/tutorials/recipes/distributed_checkpoint_recipe.html>`__
7-
* `torch.export Tutorial <https://pytorch.org/tutorials/intermediate/torch_export_tutorial.html>`__
8-
* `Facilitating New Backend Integration by PrivateUse1 <https://pytorch.org/tutorials/advanced/privateuseone.html>`__
9-
* `(prototype) Accelerating BERT with semi-structured (2:4) sparsity <https://pytorch.org/tutorials/prototype/semi_structured_sparse.html>`__
10-
* `(prototype) PyTorch 2 Export Quantization-Aware Training (QAT) <https://pytorch.org/tutorials/prototype/pt2e_quant_qat.html>`__
11-
* `(prototype) PyTorch 2 Export Post Training Quantization with X86 Backend through Inductor <https://pytorch.org/tutorials/prototype/pt2e_quant_ptq_x86_inductor.html>`__
12-
* `(prototype) Inductor C++ Wrapper Tutorial <https://pytorch.org/tutorials/prototype/inductor_cpp_wrapper_tutorial.html>`__
13-
* `How to save memory by fusing the optimizer step into the backward pass <https://pytorch.org/tutorials/intermediate/optimizer_step_in_backward_tutorial.html>`__
14-
* `Tips for Loading an nn.Module from a Checkpoint <https://pytorch.org/tutorials/recipes/recipes/module_load_state_dict_tips.html>`__
6+
* `PyTorch Inference Performance Tuning on AWS Graviton Processors <https://pytorch.org/tutorials/recipes/inference_tuning_on_aws_graviton.html>`__
7+
* `Using TORCH_LOGS python API with torch.compile <https://pytorch.org/tutorials/recipes/torch_logs.html>`__
8+
* `PyTorch 2 Export Quantization with X86 Backend through Inductor <https://pytorch.org/tutorials/prototype/pt2e_quant_x86_inductor.html>`__
9+
* `Getting Started with DeviceMesh <https://pytorch.org/tutorials/recipes/distributed_device_mesh.html>`__
10+
* `Compiling the optimizer with torch.compile <https://pytorch.org/tutorials/recipes/compiling_optimizer.html>`__
1511

1612

1713
.. raw:: html

recipes_source/compiling_optimizer.rst

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -86,3 +86,9 @@ Sample Results:
8686

8787
* Eager runtime: 747.2437149845064us
8888
* Compiled runtime: 392.07384741178us
89+
90+
See Also
91+
~~~~~~~~~
92+
93+
* For an in-depth technical overview, see
94+
`Compiling the optimizer with PT2 <https://dev-discuss.pytorch.org/t/compiling-the-optimizer-with-pt2/1669>`__

recipes_source/torch_logs.py

Lines changed: 24 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
######################################################################
1010
#
11-
# This tutorial introduces the ``TORCH_LOGS`` environment variable, as well ass the Python API, and
11+
# This tutorial introduces the ``TORCH_LOGS`` environment variable, as well as the Python API, and
1212
# demonstrates how to apply it to observe the phases of ``torch.compile``.
1313
#
1414
# .. note::
@@ -34,53 +34,49 @@
3434

3535
# exit cleanly if we are on a device that doesn't support torch.compile
3636
if torch.cuda.get_device_capability() < (7, 0):
37-
print("Exiting because torch.compile is not supported on this device.")
38-
import sys
37+
print("Skipping because torch.compile is not supported on this device.")
38+
else:
39+
@torch.compile()
40+
def fn(x, y):
41+
z = x + y
42+
return z + 2
3943

40-
sys.exit(0)
4144

42-
43-
@torch.compile()
44-
def fn(x, y):
45-
z = x + y
46-
return z + 2
47-
48-
49-
inputs = (torch.ones(2, 2, device="cuda"), torch.zeros(2, 2, device="cuda"))
45+
inputs = (torch.ones(2, 2, device="cuda"), torch.zeros(2, 2, device="cuda"))
5046

5147

5248
# print separator and reset dynamo
5349
# between each example
54-
def separator(name):
55-
print(f"==================={name}=========================")
56-
torch._dynamo.reset()
50+
def separator(name):
51+
print(f"==================={name}=========================")
52+
torch._dynamo.reset()
5753

5854

59-
separator("Dynamo Tracing")
55+
separator("Dynamo Tracing")
6056
# View dynamo tracing
6157
# TORCH_LOGS="+dynamo"
62-
torch._logging.set_logs(dynamo=logging.DEBUG)
63-
fn(*inputs)
58+
torch._logging.set_logs(dynamo=logging.DEBUG)
59+
fn(*inputs)
6460

65-
separator("Traced Graph")
61+
separator("Traced Graph")
6662
# View traced graph
6763
# TORCH_LOGS="graph"
68-
torch._logging.set_logs(graph=True)
69-
fn(*inputs)
64+
torch._logging.set_logs(graph=True)
65+
fn(*inputs)
7066

71-
separator("Fusion Decisions")
67+
separator("Fusion Decisions")
7268
# View fusion decisions
7369
# TORCH_LOGS="fusion"
74-
torch._logging.set_logs(fusion=True)
75-
fn(*inputs)
70+
torch._logging.set_logs(fusion=True)
71+
fn(*inputs)
7672

77-
separator("Output Code")
73+
separator("Output Code")
7874
# View output code generated by inductor
7975
# TORCH_LOGS="output_code"
80-
torch._logging.set_logs(output_code=True)
81-
fn(*inputs)
76+
torch._logging.set_logs(output_code=True)
77+
fn(*inputs)
8278

83-
separator("")
79+
separator("")
8480

8581
######################################################################
8682
# Conclusion

0 commit comments

Comments
 (0)