Skip to content

Commit ed493d1

Browse files
author
Svetlana Karslioglu
authored
Merge branch 'main' into suraj813-patch-1
2 parents 695a492 + f42a7ab commit ed493d1

33 files changed

+129
-3745
lines changed

.circleci/config.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -190,6 +190,7 @@ jobs:
190190
resource_class: gpu.nvidia.small.multi
191191
pytorch_tutorial_pr_build_worker_1:
192192
<<: *pytorch_tutorial_build_worker_defaults
193+
resource_class: gpu.nvidia.medium
193194
pytorch_tutorial_pr_build_worker_10:
194195
<<: *pytorch_tutorial_build_worker_defaults
195196
pytorch_tutorial_pr_build_worker_11:
@@ -234,6 +235,7 @@ jobs:
234235
resource_class: gpu.nvidia.small.multi
235236
pytorch_tutorial_trunk_build_worker_1:
236237
<<: *pytorch_tutorial_build_worker_defaults
238+
resource_class: gpu.nvidia.medium
237239
pytorch_tutorial_trunk_build_worker_10:
238240
<<: *pytorch_tutorial_build_worker_defaults
239241
pytorch_tutorial_trunk_build_worker_11:

.circleci/regenerate.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,16 +26,20 @@ def indent(indentation, data_list):
2626
def jobs(pr_or_trunk, num_workers=20, indentation=2):
2727
jobs = {}
2828

29-
# all tutorials that need gpu.nvidia.small.multi machines will be routed
30-
# by get_files_to_run.py to 0th worker
29+
# all tutorials that need gpu.nvidia.small.multi machines will be routed by
30+
# get_files_to_run.py to 0th worker, similarly for gpu.nvidia.medium and the
31+
# 1st worker
3132
needs_gpu_nvidia_small_multi = [0]
33+
needs_gpu_nvidia_medium = [1]
3234
jobs[f"pytorch_tutorial_{pr_or_trunk}_build_manager"] = {
3335
"<<": "*pytorch_tutorial_build_manager_defaults"
3436
}
3537
for i in range(num_workers):
3638
job_info = {"<<": "*pytorch_tutorial_build_worker_defaults"}
3739
if i in needs_gpu_nvidia_small_multi:
3840
job_info["resource_class"] = "gpu.nvidia.small.multi"
41+
if i in needs_gpu_nvidia_medium:
42+
job_info["resource_class"] = "gpu.nvidia.medium"
3943
jobs[f"pytorch_tutorial_{pr_or_trunk}_build_worker_{i}"] = job_info
4044

4145
return indent(indentation, jobs).replace("'", "")

.jenkins/get_files_to_run.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,15 +39,23 @@ def add_to_shard(i, filename):
3939
shard_jobs,
4040
)
4141

42+
all_other_files = all_files.copy()
4243
needs_gpu_nvidia_small_multi = list(
4344
filter(lambda x: get_needs_machine(x) == "gpu.nvidia.small.multi", all_files,)
4445
)
46+
needs_gpu_nvidia_medium = list(
47+
filter(lambda x: get_needs_machine(x) == "gpu.nvidia.medium", all_files,)
48+
)
4549
for filename in needs_gpu_nvidia_small_multi:
4650
# currently, the only job that uses gpu.nvidia.small.multi is the 0th worker,
4751
# so we'll add all the jobs that need this machine to the 0th worker
4852
add_to_shard(0, filename)
49-
50-
all_other_files = [x for x in all_files if x not in needs_gpu_nvidia_small_multi]
53+
all_other_files.remove(filename)
54+
for filename in needs_gpu_nvidia_medium:
55+
# currently, the only job that uses gpu.nvidia.medium is the 1st worker,
56+
# so we'll add all the jobs that need this machine to the 1st worker
57+
add_to_shard(1, filename)
58+
all_other_files.remove(filename)
5159

5260
sorted_files = sorted(all_other_files, key=get_duration, reverse=True,)
5361

.jenkins/metadata.json

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,5 +24,8 @@
2424
},
2525
"intermediate_source/model_parallel_tutorial.py": {
2626
"needs": "gpu.nvidia.small.multi"
27+
},
28+
"intermediate_source/torch_compile_tutorial.py": {
29+
"needs": "gpu.nvidia.medium"
2730
}
2831
}

.jenkins/validate_tutorials_built.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
"profiler",
1515
"saving_loading_models",
1616
"introyt/captumyt",
17-
"introyt/trainingyt",
1817
"examples_nn/polynomial_module",
1918
"examples_nn/dynamic_net",
2019
"examples_nn/polynomial_optim",

0 commit comments

Comments
 (0)