wip burn-rl dqn example
This commit is contained in:
parent
b98a135749
commit
354dcfd341
|
|
@ -5,13 +5,17 @@ edition = "2021"
|
|||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[[bin]]
|
||||
name = "train_dqn_burn"
|
||||
path = "src/burnrl/main.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "train_dqn"
|
||||
path = "src/bin/train_dqn.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "train_burn_rl"
|
||||
path = "src/bin/train_burn_rl.rs"
|
||||
# [[bin]]
|
||||
# name = "train_burn_rl"
|
||||
# path = "src/bin/train_burn_rl.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "train_dqn_full"
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
use bot::strategy::burn_environment::{TrictracAction, TrictracEnvironment};
|
||||
use bot::burnrl::environment::{TrictracAction, TrictracEnvironment};
|
||||
use bot::strategy::dqn_common::get_valid_actions;
|
||||
use burn_rl::base::Environment;
|
||||
use rand::Rng;
|
||||
|
|
@ -224,4 +224,3 @@ fn print_help() {
|
|||
println!(" - Pour l'instant, implémente seulement une politique epsilon-greedy simple");
|
||||
println!(" - L'intégration avec un vrai agent DQN peut être ajoutée plus tard");
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
use bot::burnrl::environment::{TrictracAction, TrictracEnvironment};
|
||||
use bot::strategy::burn_dqn_agent::{BurnDqnAgent, DqnConfig, Experience};
|
||||
use bot::strategy::burn_environment::{TrictracAction, TrictracEnvironment};
|
||||
use bot::strategy::dqn_common::get_valid_actions;
|
||||
use burn::optim::AdamConfig;
|
||||
use burn_rl::base::Environment;
|
||||
|
|
@ -130,10 +130,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
|
|||
let valid_indices: Vec<usize> = (0..valid_actions.len()).collect();
|
||||
|
||||
// Sélectionner une action avec l'agent DQN
|
||||
let action_index = agent.select_action(
|
||||
¤t_state_data,
|
||||
&valid_indices,
|
||||
);
|
||||
let action_index = agent.select_action(¤t_state_data, &valid_indices);
|
||||
let action = TrictracAction {
|
||||
index: action_index as u32,
|
||||
};
|
||||
|
|
|
|||
142
bot/src/burnrl/dqn_model.rs
Normal file
142
bot/src/burnrl/dqn_model.rs
Normal file
|
|
@ -0,0 +1,142 @@
|
|||
use crate::burnrl::utils::soft_update_linear;
|
||||
use burn::module::Module;
|
||||
use burn::nn::{Linear, LinearConfig};
|
||||
use burn::optim::AdamWConfig;
|
||||
use burn::tensor::activation::relu;
|
||||
use burn::tensor::backend::{AutodiffBackend, Backend};
|
||||
use burn::tensor::Tensor;
|
||||
use burn_rl::agent::DQN;
|
||||
use burn_rl::agent::{DQNModel, DQNTrainingConfig};
|
||||
use burn_rl::base::{Action, Agent, ElemType, Environment, Memory, Model, State};
|
||||
|
||||
#[derive(Module, Debug)]
|
||||
pub struct Net<B: Backend> {
|
||||
linear_0: Linear<B>,
|
||||
linear_1: Linear<B>,
|
||||
linear_2: Linear<B>,
|
||||
}
|
||||
|
||||
impl<B: Backend> Net<B> {
|
||||
#[allow(unused)]
|
||||
pub fn new(input_size: usize, dense_size: usize, output_size: usize) -> Self {
|
||||
Self {
|
||||
linear_0: LinearConfig::new(input_size, dense_size).init(&Default::default()),
|
||||
linear_1: LinearConfig::new(dense_size, dense_size).init(&Default::default()),
|
||||
linear_2: LinearConfig::new(dense_size, output_size).init(&Default::default()),
|
||||
}
|
||||
}
|
||||
|
||||
fn consume(self) -> (Linear<B>, Linear<B>, Linear<B>) {
|
||||
(self.linear_0, self.linear_1, self.linear_2)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> Model<B, Tensor<B, 2>, Tensor<B, 2>> for Net<B> {
|
||||
fn forward(&self, input: Tensor<B, 2>) -> Tensor<B, 2> {
|
||||
let layer_0_output = relu(self.linear_0.forward(input));
|
||||
let layer_1_output = relu(self.linear_1.forward(layer_0_output));
|
||||
|
||||
relu(self.linear_2.forward(layer_1_output))
|
||||
}
|
||||
|
||||
fn infer(&self, input: Tensor<B, 2>) -> Tensor<B, 2> {
|
||||
self.forward(input)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> DQNModel<B> for Net<B> {
|
||||
fn soft_update(this: Self, that: &Self, tau: ElemType) -> Self {
|
||||
let (linear_0, linear_1, linear_2) = this.consume();
|
||||
|
||||
Self {
|
||||
linear_0: soft_update_linear(linear_0, &that.linear_0, tau),
|
||||
linear_1: soft_update_linear(linear_1, &that.linear_1, tau),
|
||||
linear_2: soft_update_linear(linear_2, &that.linear_2, tau),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
const MEMORY_SIZE: usize = 4096;
|
||||
const DENSE_SIZE: usize = 128;
|
||||
const EPS_DECAY: f64 = 1000.0;
|
||||
const EPS_START: f64 = 0.9;
|
||||
const EPS_END: f64 = 0.05;
|
||||
|
||||
type MyAgent<E, B> = DQN<E, B, Net<B>>;
|
||||
|
||||
#[allow(unused)]
|
||||
pub fn run<E: Environment, B: AutodiffBackend>(
|
||||
num_episodes: usize,
|
||||
visualized: bool,
|
||||
) -> impl Agent<E> {
|
||||
let mut env = E::new(visualized);
|
||||
|
||||
let model = Net::<B>::new(
|
||||
<<E as Environment>::StateType as State>::size(),
|
||||
DENSE_SIZE,
|
||||
<<E as Environment>::ActionType as Action>::size(),
|
||||
);
|
||||
|
||||
let mut agent = MyAgent::new(model);
|
||||
|
||||
let config = DQNTrainingConfig::default();
|
||||
|
||||
let mut memory = Memory::<E, B, MEMORY_SIZE>::default();
|
||||
|
||||
let mut optimizer = AdamWConfig::new()
|
||||
.with_grad_clipping(config.clip_grad.clone())
|
||||
.init();
|
||||
|
||||
let mut policy_net = agent.model().as_ref().unwrap().clone();
|
||||
|
||||
let mut step = 0_usize;
|
||||
|
||||
for episode in 0..num_episodes {
|
||||
let mut episode_done = false;
|
||||
let mut episode_reward: ElemType = 0.0;
|
||||
let mut episode_duration = 0_usize;
|
||||
let mut state = env.state();
|
||||
|
||||
while !episode_done {
|
||||
let eps_threshold =
|
||||
EPS_END + (EPS_START - EPS_END) * f64::exp(-(step as f64) / EPS_DECAY);
|
||||
let action =
|
||||
DQN::<E, B, Net<B>>::react_with_exploration(&policy_net, state, eps_threshold);
|
||||
let snapshot = env.step(action);
|
||||
|
||||
episode_reward +=
|
||||
<<E as Environment>::RewardType as Into<ElemType>>::into(snapshot.reward().clone());
|
||||
|
||||
memory.push(
|
||||
state,
|
||||
*snapshot.state(),
|
||||
action,
|
||||
snapshot.reward().clone(),
|
||||
snapshot.done(),
|
||||
);
|
||||
|
||||
if config.batch_size < memory.len() {
|
||||
policy_net =
|
||||
agent.train::<MEMORY_SIZE>(policy_net, &memory, &mut optimizer, &config);
|
||||
}
|
||||
|
||||
step += 1;
|
||||
episode_duration += 1;
|
||||
|
||||
if snapshot.done() || episode_duration >= E::MAX_STEPS {
|
||||
env.reset();
|
||||
episode_done = true;
|
||||
|
||||
println!(
|
||||
"{{\"episode\": {}, \"reward\": {:.4}, \"duration\": {}}}",
|
||||
episode, episode_reward, episode_duration
|
||||
);
|
||||
} else {
|
||||
state = *snapshot.state();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
agent.valid()
|
||||
}
|
||||
|
|
@ -1,3 +1,4 @@
|
|||
use crate::strategy::dqn_common;
|
||||
use burn::{prelude::Backend, tensor::Tensor};
|
||||
use burn_rl::base::{Action, Environment, Snapshot, State};
|
||||
use rand::{thread_rng, Rng};
|
||||
|
|
@ -57,9 +58,7 @@ impl Action for TrictracAction {
|
|||
}
|
||||
|
||||
fn size() -> usize {
|
||||
// Utiliser l'espace d'actions compactes pour réduire la complexité
|
||||
// Maximum estimé basé sur les actions contextuelles
|
||||
1000 // Estimation conservative, sera ajusté dynamiquement
|
||||
1252
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -205,8 +204,8 @@ impl TrictracEnvironment {
|
|||
&self,
|
||||
action: TrictracAction,
|
||||
game_state: &GameState,
|
||||
) -> Option<super::dqn_common::TrictracAction> {
|
||||
use super::dqn_common::get_valid_actions;
|
||||
) -> Option<dqn_common::TrictracAction> {
|
||||
use dqn_common::get_valid_actions;
|
||||
|
||||
// Obtenir les actions valides dans le contexte actuel
|
||||
let valid_actions = get_valid_actions(game_state);
|
||||
|
|
@ -223,9 +222,9 @@ impl TrictracEnvironment {
|
|||
/// Exécute une action Trictrac dans le jeu
|
||||
fn execute_action(
|
||||
&mut self,
|
||||
action: super::dqn_common::TrictracAction,
|
||||
action: dqn_common::TrictracAction,
|
||||
) -> Result<f32, Box<dyn std::error::Error>> {
|
||||
use super::dqn_common::TrictracAction;
|
||||
use dqn_common::TrictracAction;
|
||||
|
||||
let mut reward = 0.0;
|
||||
|
||||
|
|
@ -320,7 +319,7 @@ impl TrictracEnvironment {
|
|||
// Si c'est le tour de l'adversaire, jouer automatiquement
|
||||
if self.game.active_player_id == self.opponent_id && self.game.stage != Stage::Ended {
|
||||
// Utiliser la stratégie default pour l'adversaire
|
||||
use super::default::DefaultStrategy;
|
||||
use crate::strategy::default::DefaultStrategy;
|
||||
use crate::BotStrategy;
|
||||
|
||||
let mut default_strategy = DefaultStrategy::default();
|
||||
16
bot/src/burnrl/main.rs
Normal file
16
bot/src/burnrl/main.rs
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
use burn::backend::{Autodiff, NdArray};
|
||||
use burn_rl::base::ElemType;
|
||||
use bot::burnrl::{
|
||||
dqn_model,
|
||||
environment,
|
||||
utils::demo_model,
|
||||
};
|
||||
|
||||
type Backend = Autodiff<NdArray<ElemType>>;
|
||||
type Env = environment::TrictracEnvironment;
|
||||
|
||||
fn main() {
|
||||
let agent = dqn_model::run::<Env, Backend>(512, false); //true);
|
||||
|
||||
demo_model::<Env>(agent);
|
||||
}
|
||||
3
bot/src/burnrl/mod.rs
Normal file
3
bot/src/burnrl/mod.rs
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
pub mod dqn_model;
|
||||
pub mod environment;
|
||||
pub mod utils;
|
||||
44
bot/src/burnrl/utils.rs
Normal file
44
bot/src/burnrl/utils.rs
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
use burn::module::{Param, ParamId};
|
||||
use burn::nn::Linear;
|
||||
use burn::tensor::backend::Backend;
|
||||
use burn::tensor::Tensor;
|
||||
use burn_rl::base::{Agent, ElemType, Environment};
|
||||
|
||||
pub fn demo_model<E: Environment>(agent: impl Agent<E>) {
|
||||
let mut env = E::new(true);
|
||||
let mut state = env.state();
|
||||
let mut done = false;
|
||||
while !done {
|
||||
if let Some(action) = agent.react(&state) {
|
||||
let snapshot = env.step(action);
|
||||
state = *snapshot.state();
|
||||
done = snapshot.done();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn soft_update_tensor<const N: usize, B: Backend>(
|
||||
this: &Param<Tensor<B, N>>,
|
||||
that: &Param<Tensor<B, N>>,
|
||||
tau: ElemType,
|
||||
) -> Param<Tensor<B, N>> {
|
||||
let that_weight = that.val();
|
||||
let this_weight = this.val();
|
||||
let new_weight = this_weight * (1.0 - tau) + that_weight * tau;
|
||||
|
||||
Param::initialized(ParamId::new(), new_weight)
|
||||
}
|
||||
|
||||
pub fn soft_update_linear<B: Backend>(
|
||||
this: Linear<B>,
|
||||
that: &Linear<B>,
|
||||
tau: ElemType,
|
||||
) -> Linear<B> {
|
||||
let weight = soft_update_tensor(&this.weight, &that.weight, tau);
|
||||
let bias = match (&this.bias, &that.bias) {
|
||||
(Some(this_bias), Some(that_bias)) => Some(soft_update_tensor(this_bias, that_bias, tau)),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
Linear::<B> { weight, bias }
|
||||
}
|
||||
|
|
@ -1,7 +1,8 @@
|
|||
pub mod burnrl;
|
||||
pub mod strategy;
|
||||
|
||||
use store::{CheckerMove, Color, GameEvent, GameState, PlayerId, PointsRules, Stage, TurnStage};
|
||||
pub use strategy::burn_dqn_strategy::{BurnDqnStrategy, create_burn_dqn_strategy};
|
||||
pub use strategy::burn_dqn_strategy::{create_burn_dqn_strategy, BurnDqnStrategy};
|
||||
pub use strategy::default::DefaultStrategy;
|
||||
pub use strategy::dqn::DqnStrategy;
|
||||
pub use strategy::erroneous_moves::ErroneousStrategy;
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
pub mod burn_dqn_agent;
|
||||
pub mod burn_dqn_strategy;
|
||||
pub mod burn_environment;
|
||||
pub mod client;
|
||||
pub mod default;
|
||||
pub mod dqn;
|
||||
|
|
|
|||
Loading…
Reference in a new issue