diff --git a/advanced_source/rpc_ddp_tutorial/main.py b/advanced_source/rpc_ddp_tutorial/main.py index f83384d0a8d..3d0d6ba2219 100644 --- a/advanced_source/rpc_ddp_tutorial/main.py +++ b/advanced_source/rpc_ddp_tutorial/main.py @@ -6,7 +6,7 @@ import torch.distributed as dist import torch.distributed.autograd as dist_autograd import torch.distributed.rpc as rpc -from torch.distributed.rpc import ProcessGroupRpcBackendOptions +from torch.distributed.rpc import TensorPipeRpcBackendOptions import torch.multiprocessing as mp import torch.optim as optim from torch.distributed.optim import DistributedOptimizer @@ -128,7 +128,7 @@ def run_worker(rank, world_size): os.environ['MASTER_PORT'] = '29500' - rpc_backend_options = ProcessGroupRpcBackendOptions() + rpc_backend_options = TensorPipeRpcBackendOptions() rpc_backend_options.init_method='tcp://localhost:29501' # Rank 2 is master, 3 is ps and 0 and 1 are trainers. diff --git a/intermediate_source/dist_pipeline_parallel_tutorial.rst b/intermediate_source/dist_pipeline_parallel_tutorial.rst index 693043478fb..2c8cc730258 100644 --- a/intermediate_source/dist_pipeline_parallel_tutorial.rst +++ b/intermediate_source/dist_pipeline_parallel_tutorial.rst @@ -316,7 +316,7 @@ where the ``shutdown`` by default will block until all RPC participants finish. def run_worker(rank, world_size, num_split): os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = '29500' - options = rpc.ProcessGroupRpcBackendOptions(num_send_recv_threads=128) + options = rpc.TensorPipeRpcBackendOptions(num_worker_threads=128) if rank == 0: rpc.init_rpc(