diff --git a/advanced_source/coding_ddpg.py b/advanced_source/coding_ddpg.py index 7486b6e0e94..0aefc716931 100644 --- a/advanced_source/coding_ddpg.py +++ b/advanced_source/coding_ddpg.py @@ -63,16 +63,25 @@ # %%bash # pip3 install torchrl mujoco glfw -import torchrl -import torch -import tqdm -from typing import Tuple - # sphinx_gallery_start_ignore import warnings warnings.filterwarnings("ignore") +import multiprocessing +# TorchRL prefers spawn method, that restricts creation of ``~torchrl.envs.ParallelEnv`` inside +# `__main__` method call, but for the easy of reading the code switch to fork +# which is also a default spawn method in Google's Colaboratory +try: + multiprocessing.set_start_method("fork") +except RuntimeError: + assert multiprocessing.get_start_method() == "fork" # sphinx_gallery_end_ignore + +import torchrl +import torch +import tqdm +from typing import Tuple + ############################################################################### # We will execute the policy on CUDA if available device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") @@ -1219,6 +1228,6 @@ def ceil_div(x, y): # # To iterate further on this loss module we might consider: # -# - Using `@dispatch` (see `[Feature] Distpatch IQL loss module `_. +# - Using `@dispatch` (see `[Feature] Distpatch IQL loss module `_.) # - Allowing flexible TensorDict keys. #