Skip to content

Commit 33e5237

Browse files
authored
Merge branch 'main' into malfet/add-pyspelling
2 parents 784933f + b2fba80 commit 33e5237

File tree

66 files changed

+3084
-4347
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

66 files changed

+3084
-4347
lines changed

.circleci/README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
Do not edit `config.yml` directly, make all the changes to `config.yml.in` and then run `regenerate.py`

.circleci/config.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -190,6 +190,7 @@ jobs:
190190
resource_class: gpu.nvidia.small.multi
191191
pytorch_tutorial_pr_build_worker_1:
192192
<<: *pytorch_tutorial_build_worker_defaults
193+
resource_class: gpu.nvidia.medium
193194
pytorch_tutorial_pr_build_worker_10:
194195
<<: *pytorch_tutorial_build_worker_defaults
195196
pytorch_tutorial_pr_build_worker_11:
@@ -234,6 +235,7 @@ jobs:
234235
resource_class: gpu.nvidia.small.multi
235236
pytorch_tutorial_trunk_build_worker_1:
236237
<<: *pytorch_tutorial_build_worker_defaults
238+
resource_class: gpu.nvidia.medium
237239
pytorch_tutorial_trunk_build_worker_10:
238240
<<: *pytorch_tutorial_build_worker_defaults
239241
pytorch_tutorial_trunk_build_worker_11:

.circleci/regenerate.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,16 +26,20 @@ def indent(indentation, data_list):
2626
def jobs(pr_or_trunk, num_workers=20, indentation=2):
2727
jobs = {}
2828

29-
# all tutorials that need gpu.nvidia.small.multi machines will be routed
30-
# by get_files_to_run.py to 0th worker
29+
# all tutorials that need gpu.nvidia.small.multi machines will be routed by
30+
# get_files_to_run.py to 0th worker, similarly for gpu.nvidia.medium and the
31+
# 1st worker
3132
needs_gpu_nvidia_small_multi = [0]
33+
needs_gpu_nvidia_medium = [1]
3234
jobs[f"pytorch_tutorial_{pr_or_trunk}_build_manager"] = {
3335
"<<": "*pytorch_tutorial_build_manager_defaults"
3436
}
3537
for i in range(num_workers):
3638
job_info = {"<<": "*pytorch_tutorial_build_worker_defaults"}
3739
if i in needs_gpu_nvidia_small_multi:
3840
job_info["resource_class"] = "gpu.nvidia.small.multi"
41+
if i in needs_gpu_nvidia_medium:
42+
job_info["resource_class"] = "gpu.nvidia.medium"
3943
jobs[f"pytorch_tutorial_{pr_or_trunk}_build_worker_{i}"] = job_info
4044

4145
return indent(indentation, jobs).replace("'", "")

.github/ISSUE_TEMPLATE/bug-report.yml

Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
name: 🐛 Bug Report
2+
description: Create a tutorial bug report
3+
title: "[BUG] - <title>"
4+
labels: [
5+
"bug"
6+
]
7+
8+
body:
9+
- type: markdown
10+
attributes:
11+
value: >
12+
#### Before submitting a bug, please make sure the issue hasn't been already addressed by searching through [the existing and past issues](https://github.com/pytorch/tutorials/issues?q=is%3Aissue+sort%3Acreated-desc+).
13+
- type: textarea
14+
attributes:
15+
label: Add Link
16+
description: |
17+
**Add the link to the tutorial***
18+
placeholder: |
19+
Link to the tutorial on the website:
20+
validations:
21+
required: true
22+
- type: textarea
23+
attributes:
24+
label: Describe the bug
25+
description: |
26+
**Add the bug description**
27+
placeholder: |
28+
Provide a detailed description of the issue with code samples if relevant
29+
```python
30+
31+
# Sample code to reproduce the problem if relevant
32+
```
33+
34+
**Expected Result:** (Describe what you were expecting to see)
35+
36+
37+
**Actual Result:** (Describe the result)
38+
39+
```
40+
The error message you got, with the full traceback.
41+
```
42+
43+
validations:
44+
required: true
45+
- type: textarea
46+
attributes:
47+
label: Describe your environment
48+
description: |
49+
**Describe the environment you encountered the bug in:**
50+
placeholder: |
51+
* Platform (i.e macOS, Linux, Google Colab):
52+
* CUDA (yes/no, version?):
53+
* PyTorch version (run `python -c "import torch; print(torch.__version__)"`):
54+
55+
validations:
56+
required: true
57+
- type: markdown
58+
attributes:
59+
value: >
60+
Thanks for contributing 🎉!
Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
name: 🚀 Feature request
2+
description: Submit a proposal for a new PyTorch tutorial or improvement of an existing tutorial
3+
title: "💡 [REQUEST] - <title>"
4+
labels: [
5+
"feature"
6+
]
7+
8+
body:
9+
- type: textarea
10+
attributes:
11+
label: 🚀 Descirbe the improvement or the new tutorial
12+
description: |
13+
**Describe the improvement**
14+
placeholder: |
15+
Explain why this improvement or new tutorial is important. For example, *"This tutorial will help users to better understand feature X of PyTorch."* If there is a tutorial that you propose to replace, add here. If this is related to another GitHub issue, add a link here.
16+
validations:
17+
required: true
18+
- type: textarea
19+
attributes:
20+
label: Existing tutorials on this topic
21+
description: |
22+
**Add a list of existing tutorials on the same topic.**
23+
placeholder: |
24+
List tutorials that already explain this functionality if exist. On pytorch.org or elsewhere.
25+
* Link
26+
* Link
27+
- type: textarea
28+
attributes:
29+
label: Additional context
30+
description: |
31+
**Add additional context**
32+
placeholder: |
33+
Add any other context or screenshots about the feature request.
34+
- type: markdown
35+
attributes:
36+
value: >
37+
Thanks for contributing 🎉!

.jenkins/get_files_to_run.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,15 +39,23 @@ def add_to_shard(i, filename):
3939
shard_jobs,
4040
)
4141

42+
all_other_files = all_files.copy()
4243
needs_gpu_nvidia_small_multi = list(
4344
filter(lambda x: get_needs_machine(x) == "gpu.nvidia.small.multi", all_files,)
4445
)
46+
needs_gpu_nvidia_medium = list(
47+
filter(lambda x: get_needs_machine(x) == "gpu.nvidia.medium", all_files,)
48+
)
4549
for filename in needs_gpu_nvidia_small_multi:
4650
# currently, the only job that uses gpu.nvidia.small.multi is the 0th worker,
4751
# so we'll add all the jobs that need this machine to the 0th worker
4852
add_to_shard(0, filename)
49-
50-
all_other_files = [x for x in all_files if x not in needs_gpu_nvidia_small_multi]
53+
all_other_files.remove(filename)
54+
for filename in needs_gpu_nvidia_medium:
55+
# currently, the only job that uses gpu.nvidia.medium is the 1st worker,
56+
# so we'll add all the jobs that need this machine to the 1st worker
57+
add_to_shard(1, filename)
58+
all_other_files.remove(filename)
5159

5260
sorted_files = sorted(all_other_files, key=get_duration, reverse=True,)
5361

.jenkins/get_sphinx_filenames.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
from pathlib import Path
2+
from typing import List
3+
4+
from get_files_to_run import get_all_files
5+
from validate_tutorials_built import NOT_RUN
6+
7+
8+
def get_files_for_sphinx() -> List[str]:
9+
all_py_files = get_all_files()
10+
return [x for x in all_py_files if all(y not in x for y in NOT_RUN)]
11+
12+
13+
SPHINX_SHOULD_RUN = "|".join(get_files_for_sphinx())

.jenkins/metadata.json

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,5 +24,8 @@
2424
},
2525
"intermediate_source/model_parallel_tutorial.py": {
2626
"needs": "gpu.nvidia.small.multi"
27+
},
28+
"intermediate_source/torch_compile_tutorial.py": {
29+
"needs": "gpu.nvidia.medium"
2730
}
2831
}

.jenkins/validate_tutorials_built.py

Lines changed: 43 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -9,51 +9,49 @@
99
# the file name to explain why, like intro.html), or fix the tutorial and remove it from this list).
1010

1111
NOT_RUN = [
12-
"basics/intro", # no code
13-
"translation_transformer",
14-
"profiler",
15-
"saving_loading_models",
16-
"introyt/captumyt",
17-
"introyt/trainingyt",
18-
"examples_nn/polynomial_module",
19-
"examples_nn/dynamic_net",
20-
"examples_nn/polynomial_optim",
21-
"former_torchies/autograd_tutorial_old",
22-
"former_torchies/tensor_tutorial_old",
23-
"examples_autograd/polynomial_autograd",
24-
"examples_autograd/polynomial_custom_function",
25-
"parametrizations",
26-
"mnist_train_nas", # used by ax_multiobjective_nas_tutorial.py
27-
"fx_conv_bn_fuser",
28-
"super_resolution_with_onnxruntime",
29-
"ddp_pipeline", # requires 4 gpus
30-
"fx_graph_mode_ptq_dynamic",
31-
"vmap_recipe",
32-
"torchscript_freezing",
33-
"nestedtensor",
34-
"recipes/saving_and_loading_models_for_inference",
35-
"recipes/saving_multiple_models_in_one_file",
36-
"recipes/loading_data_recipe",
37-
"recipes/tensorboard_with_pytorch",
38-
"recipes/what_is_state_dict",
39-
"recipes/profiler_recipe",
40-
"recipes/save_load_across_devices",
41-
"recipes/warmstarting_model_using_parameters_from_a_different_model",
42-
"torch_compile_tutorial_",
43-
"recipes/dynamic_quantization",
44-
"recipes/saving_and_loading_a_general_checkpoint",
45-
"recipes/benchmark",
46-
"recipes/tuning_guide",
47-
"recipes/zeroing_out_gradients",
48-
"recipes/defining_a_neural_network",
49-
"recipes/timer_quick_start",
50-
"recipes/amp_recipe",
51-
"recipes/Captum_Recipe",
52-
"flask_rest_api_tutorial",
53-
"text_to_speech_with_torchaudio",
12+
"beginner_source/basics/intro", # no code
13+
"beginner_source/translation_transformer",
14+
"beginner_source/profiler",
15+
"beginner_source/saving_loading_models",
16+
"beginner_source/introyt/captumyt",
17+
"beginner_source/examples_nn/polynomial_module",
18+
"beginner_source/examples_nn/dynamic_net",
19+
"beginner_source/examples_nn/polynomial_optim",
20+
"beginner_source/former_torchies/autograd_tutorial_old",
21+
"beginner_source/former_torchies/tensor_tutorial_old",
22+
"beginner_source/examples_autograd/polynomial_autograd",
23+
"beginner_source/examples_autograd/polynomial_custom_function",
24+
"intermediate_source/parametrizations",
25+
"intermediate_source/mnist_train_nas", # used by ax_multiobjective_nas_tutorial.py
26+
"intermediate_source/fx_conv_bn_fuser",
27+
"advanced_source/super_resolution_with_onnxruntime",
28+
"advanced_source/ddp_pipeline", # requires 4 gpus
29+
"prototype_source/fx_graph_mode_ptq_dynamic",
30+
"prototype_source/vmap_recipe",
31+
"prototype_source/torchscript_freezing",
32+
"prototype_source/nestedtensor",
33+
"recipes_source/recipes/saving_and_loading_models_for_inference",
34+
"recipes_source/recipes/saving_multiple_models_in_one_file",
35+
"recipes_source/recipes/loading_data_recipe",
36+
"recipes_source/recipes/tensorboard_with_pytorch",
37+
"recipes_source/recipes/what_is_state_dict",
38+
"recipes_source/recipes/profiler_recipe",
39+
"recipes_source/recipes/save_load_across_devices",
40+
"recipes_source/recipes/warmstarting_model_using_parameters_from_a_different_model",
41+
"recipes_source/recipes/dynamic_quantization",
42+
"recipes_source/recipes/saving_and_loading_a_general_checkpoint",
43+
"recipes_source/recipes/benchmark",
44+
"recipes_source/recipes/tuning_guide",
45+
"recipes_source/recipes/zeroing_out_gradients",
46+
"recipes_source/recipes/defining_a_neural_network",
47+
"recipes_source/recipes/timer_quick_start",
48+
"recipes_source/recipes/amp_recipe",
49+
"recipes_source/recipes/Captum_Recipe",
50+
"intermediate_source/flask_rest_api_tutorial",
51+
"intermediate_source/text_to_speech_with_torchaudio",
52+
"intermediate_source/tensorboard_profiler_tutorial" # reenable after 2.0 release.
5453
]
5554

56-
5755
def tutorial_source_dirs() -> List[Path]:
5856
return [
5957
p.relative_to(REPO_ROOT).with_name(p.stem[:-7])
@@ -68,6 +66,7 @@ def main() -> None:
6866
glob_path = f"{tutorial_source_dir}/**/*.html"
6967
html_file_paths += docs_dir.glob(glob_path)
7068

69+
should_not_run = [f'{x.replace("_source", "")}.html' for x in NOT_RUN]
7170
did_not_run = []
7271
for html_file_path in html_file_paths:
7372
with open(html_file_path, "r", encoding="utf-8") as html_file:
@@ -78,9 +77,7 @@ def main() -> None:
7877
if (
7978
"Total running time of the script: ( 0 minutes 0.000 seconds)"
8079
in elem.text
81-
and not any(
82-
html_file_path.match(file) for file in NOT_RUN
83-
)
80+
and not any(html_file_path.match(file) for file in should_not_run)
8481
):
8582
did_not_run.append(html_file_path.as_posix())
8683

_static/img/invpendulum.gif

29.6 KB
Loading

advanced_source/static_quantization_tutorial.rst

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -458,7 +458,8 @@ quantizing for x86 architectures. This configuration does the following:
458458
per_channel_quantized_model = load_model(saved_model_dir + float_model_file)
459459
per_channel_quantized_model.eval()
460460
per_channel_quantized_model.fuse_model()
461-
per_channel_quantized_model.qconfig = torch.ao.quantization.get_default_qconfig('fbgemm')
461+
# The old 'fbgemm' is still available but 'x86' is the recommended default.
462+
per_channel_quantized_model.qconfig = torch.ao.quantization.get_default_qconfig('x86')
462463
print(per_channel_quantized_model.qconfig)
463464
464465
torch.ao.quantization.prepare(per_channel_quantized_model, inplace=True)
@@ -534,8 +535,9 @@ We fuse modules as before
534535
qat_model = load_model(saved_model_dir + float_model_file)
535536
qat_model.fuse_model()
536537
537-
optimizer = torch.optim.SGD(qat_model.parameters(), lr = 0.0001)
538-
qat_model.qconfig = torch.ao.quantization.get_default_qat_qconfig('fbgemm')
538+
optimizer = torch.optim.SGD(qat_model.parameters(), lr = 0.0001)
539+
# The old 'fbgemm' is still available but 'x86' is the recommended default.
540+
qat_model.qconfig = torch.ao.quantization.get_default_qat_qconfig('x86')
539541
540542
Finally, ``prepare_qat`` performs the "fake quantization", preparing the model for quantization-aware training
541543

0 commit comments

Comments
 (0)