Skip to content

Commit 35125b1

Browse files
authored
switch to v100 (#2301)
1 parent 417d1af commit 35125b1

File tree

4 files changed

+9
-9
lines changed

4 files changed

+9
-9
lines changed

.circleci/config.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -204,7 +204,7 @@ jobs:
204204
resource_class: gpu.nvidia.small.multi
205205
pytorch_tutorial_pr_build_worker_1:
206206
<<: *pytorch_tutorial_build_worker_defaults
207-
resource_class: gpu.nvidia.medium
207+
resource_class: gpu.nvidia.large
208208
pytorch_tutorial_pr_build_worker_10:
209209
<<: *pytorch_tutorial_build_worker_defaults
210210
pytorch_tutorial_pr_build_worker_11:
@@ -249,7 +249,7 @@ jobs:
249249
resource_class: gpu.nvidia.small.multi
250250
pytorch_tutorial_trunk_build_worker_1:
251251
<<: *pytorch_tutorial_build_worker_defaults
252-
resource_class: gpu.nvidia.medium
252+
resource_class: gpu.nvidia.large
253253
pytorch_tutorial_trunk_build_worker_10:
254254
<<: *pytorch_tutorial_build_worker_defaults
255255
pytorch_tutorial_trunk_build_worker_11:

.circleci/regenerate.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -27,19 +27,19 @@ def jobs(pr_or_trunk, num_workers=20, indentation=2):
2727
jobs = {}
2828

2929
# all tutorials that need gpu.nvidia.small.multi machines will be routed by
30-
# get_files_to_run.py to 0th worker, similarly for gpu.nvidia.medium and the
30+
# get_files_to_run.py to 0th worker, similarly for gpu.nvidia.large and the
3131
# 1st worker
3232
needs_gpu_nvidia_small_multi = [0]
33-
needs_gpu_nvidia_medium = [1]
33+
needs_gpu_nvidia_large = [1]
3434
jobs[f"pytorch_tutorial_{pr_or_trunk}_build_manager"] = {
3535
"<<": "*pytorch_tutorial_build_manager_defaults"
3636
}
3737
for i in range(num_workers):
3838
job_info = {"<<": "*pytorch_tutorial_build_worker_defaults"}
3939
if i in needs_gpu_nvidia_small_multi:
4040
job_info["resource_class"] = "gpu.nvidia.small.multi"
41-
if i in needs_gpu_nvidia_medium:
42-
job_info["resource_class"] = "gpu.nvidia.medium"
41+
if i in needs_gpu_nvidia_large:
42+
job_info["resource_class"] = "gpu.nvidia.large"
4343
jobs[f"pytorch_tutorial_{pr_or_trunk}_build_worker_{i}"] = job_info
4444

4545
return indent(indentation, jobs).replace("'", "")

.jenkins/get_files_to_run.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,15 +44,15 @@ def add_to_shard(i, filename):
4444
filter(lambda x: get_needs_machine(x) == "gpu.nvidia.small.multi", all_files,)
4545
)
4646
needs_gpu_nvidia_medium = list(
47-
filter(lambda x: get_needs_machine(x) == "gpu.nvidia.medium", all_files,)
47+
filter(lambda x: get_needs_machine(x) == "gpu.nvidia.large", all_files,)
4848
)
4949
for filename in needs_gpu_nvidia_small_multi:
5050
# currently, the only job that uses gpu.nvidia.small.multi is the 0th worker,
5151
# so we'll add all the jobs that need this machine to the 0th worker
5252
add_to_shard(0, filename)
5353
all_other_files.remove(filename)
5454
for filename in needs_gpu_nvidia_medium:
55-
# currently, the only job that uses gpu.nvidia.medium is the 1st worker,
55+
# currently, the only job that uses gpu.nvidia.large is the 1st worker,
5656
# so we'll add all the jobs that need this machine to the 1st worker
5757
add_to_shard(1, filename)
5858
all_other_files.remove(filename)

.jenkins/metadata.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,6 @@
2626
"needs": "gpu.nvidia.small.multi"
2727
},
2828
"intermediate_source/torch_compile_tutorial.py": {
29-
"needs": "gpu.nvidia.medium"
29+
"needs": "gpu.nvidia.large"
3030
}
3131
}

0 commit comments

Comments
 (0)