From 4d6f71b12659c80233649d4041a30c1f2978a5d2 Mon Sep 17 00:00:00 2001 From: Catherine Lee Date: Wed, 26 Apr 2023 10:22:27 -0700 Subject: [PATCH] switch to v100 --- .circleci/config.yml | 4 ++-- .circleci/regenerate.py | 8 ++++---- .jenkins/get_files_to_run.py | 4 ++-- .jenkins/metadata.json | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 38bbfb83818..78e1e9b117f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -204,7 +204,7 @@ jobs: resource_class: gpu.nvidia.small.multi pytorch_tutorial_pr_build_worker_1: <<: *pytorch_tutorial_build_worker_defaults - resource_class: gpu.nvidia.medium + resource_class: gpu.nvidia.large pytorch_tutorial_pr_build_worker_10: <<: *pytorch_tutorial_build_worker_defaults pytorch_tutorial_pr_build_worker_11: @@ -249,7 +249,7 @@ jobs: resource_class: gpu.nvidia.small.multi pytorch_tutorial_trunk_build_worker_1: <<: *pytorch_tutorial_build_worker_defaults - resource_class: gpu.nvidia.medium + resource_class: gpu.nvidia.large pytorch_tutorial_trunk_build_worker_10: <<: *pytorch_tutorial_build_worker_defaults pytorch_tutorial_trunk_build_worker_11: diff --git a/.circleci/regenerate.py b/.circleci/regenerate.py index 42da5c34f52..f47ee1dfa6f 100644 --- a/.circleci/regenerate.py +++ b/.circleci/regenerate.py @@ -27,10 +27,10 @@ def jobs(pr_or_trunk, num_workers=20, indentation=2): jobs = {} # all tutorials that need gpu.nvidia.small.multi machines will be routed by - # get_files_to_run.py to 0th worker, similarly for gpu.nvidia.medium and the + # get_files_to_run.py to 0th worker, similarly for gpu.nvidia.large and the # 1st worker needs_gpu_nvidia_small_multi = [0] - needs_gpu_nvidia_medium = [1] + needs_gpu_nvidia_large = [1] jobs[f"pytorch_tutorial_{pr_or_trunk}_build_manager"] = { "<<": "*pytorch_tutorial_build_manager_defaults" } @@ -38,8 +38,8 @@ def jobs(pr_or_trunk, num_workers=20, indentation=2): job_info = {"<<": "*pytorch_tutorial_build_worker_defaults"} if i in needs_gpu_nvidia_small_multi: job_info["resource_class"] = "gpu.nvidia.small.multi" - if i in needs_gpu_nvidia_medium: - job_info["resource_class"] = "gpu.nvidia.medium" + if i in needs_gpu_nvidia_large: + job_info["resource_class"] = "gpu.nvidia.large" jobs[f"pytorch_tutorial_{pr_or_trunk}_build_worker_{i}"] = job_info return indent(indentation, jobs).replace("'", "") diff --git a/.jenkins/get_files_to_run.py b/.jenkins/get_files_to_run.py index fc5d4310ac7..ae04d387b46 100644 --- a/.jenkins/get_files_to_run.py +++ b/.jenkins/get_files_to_run.py @@ -44,7 +44,7 @@ def add_to_shard(i, filename): filter(lambda x: get_needs_machine(x) == "gpu.nvidia.small.multi", all_files,) ) needs_gpu_nvidia_medium = list( - filter(lambda x: get_needs_machine(x) == "gpu.nvidia.medium", all_files,) + filter(lambda x: get_needs_machine(x) == "gpu.nvidia.large", all_files,) ) for filename in needs_gpu_nvidia_small_multi: # currently, the only job that uses gpu.nvidia.small.multi is the 0th worker, @@ -52,7 +52,7 @@ def add_to_shard(i, filename): add_to_shard(0, filename) all_other_files.remove(filename) for filename in needs_gpu_nvidia_medium: - # currently, the only job that uses gpu.nvidia.medium is the 1st worker, + # currently, the only job that uses gpu.nvidia.large is the 1st worker, # so we'll add all the jobs that need this machine to the 1st worker add_to_shard(1, filename) all_other_files.remove(filename) diff --git a/.jenkins/metadata.json b/.jenkins/metadata.json index 9a881ccfa88..40c0e13c74e 100644 --- a/.jenkins/metadata.json +++ b/.jenkins/metadata.json @@ -26,6 +26,6 @@ "needs": "gpu.nvidia.small.multi" }, "intermediate_source/torch_compile_tutorial.py": { - "needs": "gpu.nvidia.medium" + "needs": "gpu.nvidia.large" } }