Skip to content

Commit 425a694

Browse files
committed
SyncDataCollector
1 parent b878abd commit 425a694

File tree

1 file changed

+10
-12
lines changed

1 file changed

+10
-12
lines changed

intermediate_source/coding_ddpg.py

Lines changed: 10 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -808,15 +808,16 @@ def make_ddpg_actor(
808808
# GPU, number of workers, and so on).
809809
#
810810
# Here we will use
811-
# :class:`~torchrl.collectors.MultiaSyncDataCollector`, a data collector that
812-
# will be executed in an asynchronous manner (for example, data will be collected while
813-
# the policy is being optimized). With the :class:`MultiaSyncDataCollector`,
814-
# multiple workers are running rollouts separately. When a batch is asked, it
815-
# is gathered from the first worker that can provide it.
811+
# :class:`~torchrl.collectors.SyncDataCollector`, a simple, single-process
812+
# data collector. TorchRL offers other collectors, such as
813+
# :class:`~torchrl.collectors.MultiaSyncDataCollector`, which executed the
814+
# rollouts in an asynchronous manner (for example, data will be collected while
815+
# the policy is being optimized, thereby decoupling the training and
816+
# data collection).
816817
#
817818
# The parameters to specify are:
818819
#
819-
# - the list of environment creation functions,
820+
# - an environment factory or an environment,
820821
# - the policy,
821822
# - the total number of frames before the collector is considered empty,
822823
# - the maximum number of frames per trajectory (useful for non-terminating
@@ -854,14 +855,11 @@ def make_ddpg_actor(
854855
init_random_frames = 5000
855856
num_collectors = 2
856857

857-
from torchrl.collectors import MultiaSyncDataCollector
858+
from torchrl.collectors import SyncDataCollector
858859
from torchrl.envs import ExplorationType
859860

860-
collector = MultiaSyncDataCollector(
861-
create_env_fn=[
862-
parallel_env,
863-
]
864-
* num_collectors,
861+
collector = SyncDataCollector(
862+
parallel_env,
865863
policy=actor_model_explore,
866864
total_frames=total_frames,
867865
frames_per_batch=frames_per_batch,

0 commit comments

Comments
 (0)