-
Notifications
You must be signed in to change notification settings - Fork 6.2k
/
Copy pathmulti_agent_pendulum.py
73 lines (61 loc) · 2.32 KB
/
multi_agent_pendulum.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
"""Simple example of setting up an agent-to-module mapping function.
How to run this script
----------------------
`python [script file name].py --enable-new-api-stack --num-agents=2`
Control the number of agents and policies (RLModules) via --num-agents and
--num-policies.
For debugging, use the following additional command line options
`--no-tune --num-env-runners=0`
which should allow you to set breakpoints anywhere in the RLlib code and
have the execution stop there for inspection and debugging.
For logging to your WandB account, use:
`--wandb-key=[your WandB API key] --wandb-project=[some project name]
--wandb-run-name=[optional: WandB run name (within the defined project)]`
"""
from ray.rllib.core.rl_module.default_model_config import DefaultModelConfig
from ray.rllib.examples.envs.classes.multi_agent import MultiAgentPendulum
from ray.rllib.utils.test_utils import (
add_rllib_example_script_args,
run_rllib_example_script_experiment,
)
from ray.tune.registry import get_trainable_cls, register_env
parser = add_rllib_example_script_args(
default_iters=200,
default_timesteps=100000,
default_reward=-400.0,
)
# TODO (sven): This arg is currently ignored (hard-set to 2).
parser.add_argument("--num-policies", type=int, default=2)
if __name__ == "__main__":
args = parser.parse_args()
# Register our environment with tune.
if args.num_agents > 0:
register_env(
"env",
lambda _: MultiAgentPendulum(config={"num_agents": args.num_agents}),
)
base_config = (
get_trainable_cls(args.algo)
.get_default_config()
.environment("env" if args.num_agents > 0 else "Pendulum-v1")
.training(
train_batch_size_per_learner=512,
minibatch_size=64,
lambda_=0.1,
gamma=0.95,
lr=0.0003,
model={"fcnet_activation": "relu"},
vf_clip_param=10.0,
)
.rl_module(
model_config=DefaultModelConfig(fcnet_activation="relu"),
)
)
# Add a simple multi-agent setup.
if args.num_agents > 0:
base_config.multi_agent(
policies={f"p{i}" for i in range(args.num_agents)},
policy_mapping_fn=lambda aid, *a, **kw: f"p{aid}",
)
# Augment
run_rllib_example_script_experiment(base_config, args)