diff --git a/.jenkins/get_files_to_run.py b/.jenkins/get_files_to_run.py index 6ea5dea37b4..72e859c8e8f 100644 --- a/.jenkins/get_files_to_run.py +++ b/.jenkins/get_files_to_run.py @@ -46,6 +46,9 @@ def add_to_shard(i, filename): needs_gpu_nvidia_medium = list( filter(lambda x: get_needs_machine(x) == "gpu.nvidia.large", all_files,) ) + needs_a10g = list( + filter(lambda x: get_needs_machine(x) == "linux.g5.4xlarge.nvidia.gpu", all_files,) + ) for filename in needs_gpu_nvidia_small_multi: # currently, the only job that uses gpu.nvidia.small.multi is the 0th worker, # so we'll add all the jobs that need this machine to the 0th worker @@ -56,6 +59,11 @@ def add_to_shard(i, filename): # so we'll add all the jobs that need this machine to the 1st worker add_to_shard(1, filename) all_other_files.remove(filename) + for filename in needs_a10g: + # currently, workers 2-5th use linux.g5.4xlarge.nvidia.gpu, so, arbitrarily, + # we'll add all the jobs that need this machine to the 5th worker + add_to_shard(5, filename) + all_other_files.remove(filename) sorted_files = sorted(all_other_files, key=get_duration, reverse=True,) diff --git a/.jenkins/metadata.json b/.jenkins/metadata.json index ef793f0b1db..c537c85a60b 100644 --- a/.jenkins/metadata.json +++ b/.jenkins/metadata.json @@ -30,5 +30,8 @@ }, "intermediate_source/torch_compile_tutorial.py": { "needs": "gpu.nvidia.large" + }, + "intermediate_source/scaled_dot_product_attention_tutorial.py": { + "needs": "linux.g5.4xlarge.nvidia.gpu" } }