trictrac/bot/src/burnrl/main.rs

94 lines
3.8 KiB
Rust
Raw Normal View History

2025-08-20 13:09:57 +02:00
use bot::burnrl::environment::TrictracEnvironment;
2025-08-21 11:30:25 +02:00
use bot::burnrl::environment_big::TrictracEnvironment as TrictracEnvironmentBig;
use bot::burnrl::environment_valid::TrictracEnvironment as TrictracEnvironmentValid;
2025-08-20 13:09:57 +02:00
use bot::burnrl::utils::{demo_model, Config};
2025-08-21 11:30:25 +02:00
use bot::burnrl::{dqn_big_model, dqn_model, dqn_valid_model, ppo_model, sac_model};
2025-08-20 13:09:57 +02:00
use burn::backend::{Autodiff, NdArray};
use burn_rl::base::ElemType;
2025-08-21 11:30:25 +02:00
use std::env;
2025-08-20 13:09:57 +02:00
type Backend = Autodiff<NdArray<ElemType>>;
fn main() {
2025-08-21 11:30:25 +02:00
let args: Vec<String> = env::args().collect();
let algo = &args[1];
// let dir_path = &args[2];
let path = format!("bot/models/burnrl_{algo}");
2025-08-20 13:09:57 +02:00
let conf = Config {
save_path: Some(path.clone()),
num_episodes: 30, // 40
max_steps: 1000, // 1000 max steps by episode
dense_size: 256, // 128 neural network complexity (default 128)
gamma: 0.9999, // 0.999 discount factor. Plus élevé = encourage stratégies à long terme
tau: 0.0005, // 0.005 soft update rate. Taux de mise à jour du réseau cible. Plus bas = adaptation
// plus lente moins sensible aux coups de chance
learning_rate: 0.001, // 0.001 taille du pas. Bas : plus lent, haut : risque de ne jamais
// converger
batch_size: 128, // 32 nombre d'expériences passées sur lesquelles pour calcul de l'erreur moy.
clip_grad: 70.0, // 100 limite max de correction à apporter au gradient (default 100)
2025-08-21 14:35:25 +02:00
// SAC
2025-08-20 13:09:57 +02:00
min_probability: 1e-9,
2025-08-21 14:35:25 +02:00
// DQN
2025-08-20 13:09:57 +02:00
eps_start: 0.9, // 0.9 epsilon initial value (0.9 => more exploration)
eps_end: 0.05, // 0.05
// eps_decay higher = epsilon decrease slower
// used in : epsilon = eps_end + (eps_start - eps_end) * e^(-step / eps_decay);
// epsilon is updated at the start of each episode
eps_decay: 2000.0, // 1000 ?
2025-08-21 14:35:25 +02:00
// PPO
2025-08-20 13:09:57 +02:00
lambda: 0.95,
epsilon_clip: 0.2,
critic_weight: 0.5,
entropy_weight: 0.01,
epochs: 8,
};
println!("{conf}----------");
2025-08-21 11:30:25 +02:00
match algo.as_str() {
"dqn" => {
2025-08-21 14:35:25 +02:00
let _agent = dqn_model::run::<TrictracEnvironment, Backend>(&conf, false);
2025-08-21 11:30:25 +02:00
println!("> Chargement du modèle pour test");
let loaded_model = dqn_model::load_model(conf.dense_size, &path);
let loaded_agent: burn_rl::agent::DQN<TrictracEnvironment, _, _> =
burn_rl::agent::DQN::new(loaded_model.unwrap());
2025-08-20 13:09:57 +02:00
2025-08-21 11:30:25 +02:00
println!("> Test avec le modèle chargé");
demo_model(loaded_agent);
}
"dqn_big" => {
2025-08-21 14:35:25 +02:00
let _agent = dqn_big_model::run::<TrictracEnvironmentBig, Backend>(&conf, false);
2025-08-21 11:30:25 +02:00
}
"dqn_valid" => {
2025-08-21 14:35:25 +02:00
let _agent = dqn_valid_model::run::<TrictracEnvironmentValid, Backend>(&conf, false);
2025-08-21 11:30:25 +02:00
}
"sac" => {
2025-08-21 14:35:25 +02:00
let _agent = sac_model::run::<TrictracEnvironment, Backend>(&conf, false);
println!("> Chargement du modèle pour test");
let loaded_model = sac_model::load_model(conf.dense_size, &path);
let loaded_agent: burn_rl::agent::SAC<TrictracEnvironment, _, _> =
burn_rl::agent::SAC::new(loaded_model.unwrap());
println!("> Test avec le modèle chargé");
demo_model(loaded_agent);
2025-08-21 11:30:25 +02:00
}
"ppo" => {
2025-08-21 14:35:25 +02:00
let _agent = ppo_model::run::<TrictracEnvironment, Backend>(&conf, false);
println!("> Chargement du modèle pour test");
let loaded_model = ppo_model::load_model(conf.dense_size, &path);
let loaded_agent: burn_rl::agent::PPO<TrictracEnvironment, _, _> =
burn_rl::agent::PPO::new(loaded_model.unwrap());
println!("> Test avec le modèle chargé");
demo_model(loaded_agent);
2025-08-21 11:30:25 +02:00
}
&_ => {
dbg!("unknown algo {algo}");
}
}
2025-08-20 13:09:57 +02:00
}