refact:remove server & bevy client ; remove _big bot algs

This commit is contained in:
Henri Bourcereau 2026-01-04 12:43:21 +01:00
parent 1e773671d9
commit 74f692d7ba
38 changed files with 24 additions and 3281 deletions

View file

@ -1,194 +0,0 @@
use crate::burnrl::environment_big::TrictracEnvironment;
use crate::burnrl::utils::{soft_update_linear, Config};
use burn::backend::{ndarray::NdArrayDevice, NdArray};
use burn::module::Module;
use burn::nn::{Linear, LinearConfig};
use burn::optim::AdamWConfig;
use burn::record::{CompactRecorder, Recorder};
use burn::tensor::activation::relu;
use burn::tensor::backend::{AutodiffBackend, Backend};
use burn::tensor::Tensor;
use burn_rl::agent::DQN;
use burn_rl::agent::{DQNModel, DQNTrainingConfig};
use burn_rl::base::{Action, Agent, ElemType, Environment, Memory, Model, State};
use std::time::SystemTime;
#[derive(Module, Debug)]
pub struct Net<B: Backend> {
linear_0: Linear<B>,
linear_1: Linear<B>,
linear_2: Linear<B>,
}
impl<B: Backend> Net<B> {
#[allow(unused)]
pub fn new(input_size: usize, dense_size: usize, output_size: usize) -> Self {
Self {
linear_0: LinearConfig::new(input_size, dense_size).init(&Default::default()),
linear_1: LinearConfig::new(dense_size, dense_size).init(&Default::default()),
linear_2: LinearConfig::new(dense_size, output_size).init(&Default::default()),
}
}
fn consume(self) -> (Linear<B>, Linear<B>, Linear<B>) {
(self.linear_0, self.linear_1, self.linear_2)
}
}
impl<B: Backend> Model<B, Tensor<B, 2>, Tensor<B, 2>> for Net<B> {
fn forward(&self, input: Tensor<B, 2>) -> Tensor<B, 2> {
let layer_0_output = relu(self.linear_0.forward(input));
let layer_1_output = relu(self.linear_1.forward(layer_0_output));
relu(self.linear_2.forward(layer_1_output))
}
fn infer(&self, input: Tensor<B, 2>) -> Tensor<B, 2> {
self.forward(input)
}
}
impl<B: Backend> DQNModel<B> for Net<B> {
fn soft_update(this: Self, that: &Self, tau: ElemType) -> Self {
let (linear_0, linear_1, linear_2) = this.consume();
Self {
linear_0: soft_update_linear(linear_0, &that.linear_0, tau),
linear_1: soft_update_linear(linear_1, &that.linear_1, tau),
linear_2: soft_update_linear(linear_2, &that.linear_2, tau),
}
}
}
#[allow(unused)]
const MEMORY_SIZE: usize = 8192;
type MyAgent<E, B> = DQN<E, B, Net<B>>;
#[allow(unused)]
// pub fn run<E: Environment + AsMut<TrictracEnvironment>, B: AutodiffBackend>(
pub fn run<
E: Environment + AsMut<TrictracEnvironment>,
B: AutodiffBackend<InnerBackend = NdArray>,
>(
conf: &Config,
visualized: bool,
// ) -> DQN<E, B, Net<B>> {
) -> impl Agent<E> {
let mut env = E::new(visualized);
env.as_mut().max_steps = conf.max_steps;
let model = Net::<B>::new(
<<E as Environment>::StateType as State>::size(),
conf.dense_size,
<<E as Environment>::ActionType as Action>::size(),
);
let mut agent = MyAgent::new(model);
// let config = DQNTrainingConfig::default();
let config = DQNTrainingConfig {
gamma: conf.gamma,
tau: conf.tau,
learning_rate: conf.learning_rate,
batch_size: conf.batch_size,
clip_grad: Some(burn::grad_clipping::GradientClippingConfig::Value(
conf.clip_grad,
)),
};
let mut memory = Memory::<E, B, MEMORY_SIZE>::default();
let mut optimizer = AdamWConfig::new()
.with_grad_clipping(config.clip_grad.clone())
.init();
let mut policy_net = agent.model().as_ref().unwrap().clone();
let mut step = 0_usize;
for episode in 0..conf.num_episodes {
let mut episode_done = false;
let mut episode_reward: ElemType = 0.0;
let mut episode_duration = 0_usize;
let mut state = env.state();
let mut now = SystemTime::now();
while !episode_done {
let eps_threshold = conf.eps_end
+ (conf.eps_start - conf.eps_end) * f64::exp(-(step as f64) / conf.eps_decay);
let action =
DQN::<E, B, Net<B>>::react_with_exploration(&policy_net, state, eps_threshold);
let snapshot = env.step(action);
episode_reward +=
<<E as Environment>::RewardType as Into<ElemType>>::into(snapshot.reward().clone());
memory.push(
state,
*snapshot.state(),
action,
snapshot.reward().clone(),
snapshot.done(),
);
if config.batch_size < memory.len() {
policy_net =
agent.train::<MEMORY_SIZE>(policy_net, &memory, &mut optimizer, &config);
}
step += 1;
episode_duration += 1;
if snapshot.done() || episode_duration >= conf.max_steps {
let envmut = env.as_mut();
let goodmoves_ratio = ((envmut.goodmoves_count as f32 / episode_duration as f32)
* 100.0)
.round() as u32;
println!(
"{{\"episode\": {episode}, \"reward\": {episode_reward:.4}, \"steps count\": {episode_duration}, \"epsilon\": {eps_threshold:.3}, \"goodmoves\": {}, \"ratio\": {}%, \"rollpoints\":{}, \"duration\": {}}}",
envmut.goodmoves_count,
goodmoves_ratio,
envmut.pointrolls_count,
now.elapsed().unwrap().as_secs(),
);
env.reset();
episode_done = true;
now = SystemTime::now();
} else {
state = *snapshot.state();
}
}
}
let valid_agent = agent.valid();
if let Some(path) = &conf.save_path {
save_model(valid_agent.model().as_ref().unwrap(), path);
}
valid_agent
}
pub fn save_model(model: &Net<NdArray<ElemType>>, path: &String) {
let recorder = CompactRecorder::new();
let model_path = format!("{path}.mpk");
println!("info: Modèle de validation sauvegardé : {model_path}");
recorder
.record(model.clone().into_record(), model_path.into())
.unwrap();
}
pub fn load_model(dense_size: usize, path: &String) -> Option<Net<NdArray<ElemType>>> {
let model_path = format!("{path}.mpk");
// println!("Chargement du modèle depuis : {model_path}");
CompactRecorder::new()
.load(model_path.into(), &NdArrayDevice::default())
.map(|record| {
Net::new(
<TrictracEnvironment as Environment>::StateType::size(),
dense_size,
<TrictracEnvironment as Environment>::ActionType::size(),
)
.load_record(record)
})
.ok()
}

View file

@ -1,9 +1,6 @@
pub mod dqn;
pub mod dqn_big;
pub mod dqn_valid;
pub mod ppo;
pub mod ppo_big;
pub mod ppo_valid;
pub mod sac;
pub mod sac_big;
pub mod sac_valid;

View file

@ -1,191 +0,0 @@
use crate::burnrl::environment_big::TrictracEnvironment;
use crate::burnrl::utils::Config;
use burn::backend::{ndarray::NdArrayDevice, NdArray};
use burn::module::Module;
use burn::nn::{Initializer, Linear, LinearConfig};
use burn::optim::AdamWConfig;
use burn::record::{CompactRecorder, Recorder};
use burn::tensor::activation::{relu, softmax};
use burn::tensor::backend::{AutodiffBackend, Backend};
use burn::tensor::Tensor;
use burn_rl::agent::{PPOModel, PPOOutput, PPOTrainingConfig, PPO};
use burn_rl::base::{Action, Agent, ElemType, Environment, Memory, Model, State};
use std::env;
use std::fs;
use std::time::SystemTime;
#[derive(Module, Debug)]
pub struct Net<B: Backend> {
linear: Linear<B>,
linear_actor: Linear<B>,
linear_critic: Linear<B>,
}
impl<B: Backend> Net<B> {
#[allow(unused)]
pub fn new(input_size: usize, dense_size: usize, output_size: usize) -> Self {
let initializer = Initializer::XavierUniform { gain: 1.0 };
Self {
linear: LinearConfig::new(input_size, dense_size)
.with_initializer(initializer.clone())
.init(&Default::default()),
linear_actor: LinearConfig::new(dense_size, output_size)
.with_initializer(initializer.clone())
.init(&Default::default()),
linear_critic: LinearConfig::new(dense_size, 1)
.with_initializer(initializer)
.init(&Default::default()),
}
}
}
impl<B: Backend> Model<B, Tensor<B, 2>, PPOOutput<B>, Tensor<B, 2>> for Net<B> {
fn forward(&self, input: Tensor<B, 2>) -> PPOOutput<B> {
let layer_0_output = relu(self.linear.forward(input));
let policies = softmax(self.linear_actor.forward(layer_0_output.clone()), 1);
let values = self.linear_critic.forward(layer_0_output);
PPOOutput::<B>::new(policies, values)
}
fn infer(&self, input: Tensor<B, 2>) -> Tensor<B, 2> {
let layer_0_output = relu(self.linear.forward(input));
softmax(self.linear_actor.forward(layer_0_output.clone()), 1)
}
}
impl<B: Backend> PPOModel<B> for Net<B> {}
#[allow(unused)]
const MEMORY_SIZE: usize = 512;
type MyAgent<E, B> = PPO<E, B, Net<B>>;
#[allow(unused)]
pub fn run<
E: Environment + AsMut<TrictracEnvironment>,
B: AutodiffBackend<InnerBackend = NdArray>,
>(
conf: &Config,
visualized: bool,
// ) -> PPO<E, B, Net<B>> {
) -> impl Agent<E> {
let mut env = E::new(visualized);
env.as_mut().max_steps = conf.max_steps;
let mut model = Net::<B>::new(
<<E as Environment>::StateType as State>::size(),
conf.dense_size,
<<E as Environment>::ActionType as Action>::size(),
);
let agent = MyAgent::default();
let config = PPOTrainingConfig {
gamma: conf.gamma,
lambda: conf.lambda,
epsilon_clip: conf.epsilon_clip,
critic_weight: conf.critic_weight,
entropy_weight: conf.entropy_weight,
learning_rate: conf.learning_rate,
epochs: conf.epochs,
batch_size: conf.batch_size,
clip_grad: Some(burn::grad_clipping::GradientClippingConfig::Value(
conf.clip_grad,
)),
};
let mut optimizer = AdamWConfig::new()
.with_grad_clipping(config.clip_grad.clone())
.init();
let mut memory = Memory::<E, B, MEMORY_SIZE>::default();
for episode in 0..conf.num_episodes {
let mut episode_done = false;
let mut episode_reward = 0.0;
let mut episode_duration = 0_usize;
let mut now = SystemTime::now();
env.reset();
while !episode_done {
let state = env.state();
if let Some(action) = MyAgent::<E, _>::react_with_model(&state, &model) {
let snapshot = env.step(action);
episode_reward += <<E as Environment>::RewardType as Into<ElemType>>::into(
snapshot.reward().clone(),
);
memory.push(
state,
*snapshot.state(),
action,
snapshot.reward().clone(),
snapshot.done(),
);
episode_duration += 1;
episode_done = snapshot.done() || episode_duration >= conf.max_steps;
}
}
println!(
"{{\"episode\": {episode}, \"reward\": {episode_reward:.4}, \"steps count\": {episode_duration}, \"duration\": {}}}",
now.elapsed().unwrap().as_secs(),
);
now = SystemTime::now();
model = MyAgent::train::<MEMORY_SIZE>(model, &memory, &mut optimizer, &config);
memory.clear();
}
if let Some(path) = &conf.save_path {
let device = NdArrayDevice::default();
let recorder = CompactRecorder::new();
let tmp_path = env::temp_dir().join("tmp_model.mpk");
// Save the trained model (backend B) to a temporary file
recorder
.record(model.clone().into_record(), tmp_path.clone())
.expect("Failed to save temporary model");
// Create a new model instance with the target backend (NdArray)
let model_to_save: Net<NdArray<ElemType>> = Net::new(
<<E as Environment>::StateType as State>::size(),
conf.dense_size,
<<E as Environment>::ActionType as Action>::size(),
);
// Load the record from the temporary file into the new model
let record = recorder
.load(tmp_path.clone(), &device)
.expect("Failed to load temporary model");
let model_with_loaded_weights = model_to_save.load_record(record);
// Clean up the temporary file
fs::remove_file(tmp_path).expect("Failed to remove temporary model file");
save_model(&model_with_loaded_weights, path);
}
agent.valid(model)
}
pub fn save_model(model: &Net<NdArray<ElemType>>, path: &String) {
let recorder = CompactRecorder::new();
let model_path = format!("{path}.mpk");
println!("info: Modèle de validation sauvegardé : {model_path}");
recorder
.record(model.clone().into_record(), model_path.into())
.unwrap();
}
pub fn load_model(dense_size: usize, path: &String) -> Option<Net<NdArray<ElemType>>> {
let model_path = format!("{path}.mpk");
// println!("Chargement du modèle depuis : {model_path}");
CompactRecorder::new()
.load(model_path.into(), &NdArrayDevice::default())
.map(|record| {
Net::new(
<TrictracEnvironment as Environment>::StateType::size(),
dense_size,
<TrictracEnvironment as Environment>::ActionType::size(),
)
.load_record(record)
})
.ok()
}

View file

@ -1,222 +0,0 @@
use crate::burnrl::environment_big::TrictracEnvironment;
use crate::burnrl::utils::{soft_update_linear, Config};
use burn::backend::{ndarray::NdArrayDevice, NdArray};
use burn::module::Module;
use burn::nn::{Linear, LinearConfig};
use burn::optim::AdamWConfig;
use burn::record::{CompactRecorder, Recorder};
use burn::tensor::activation::{relu, softmax};
use burn::tensor::backend::{AutodiffBackend, Backend};
use burn::tensor::Tensor;
use burn_rl::agent::{SACActor, SACCritic, SACNets, SACOptimizer, SACTrainingConfig, SAC};
use burn_rl::base::{Action, Agent, ElemType, Environment, Memory, Model, State};
use std::time::SystemTime;
#[derive(Module, Debug)]
pub struct Actor<B: Backend> {
linear_0: Linear<B>,
linear_1: Linear<B>,
linear_2: Linear<B>,
}
impl<B: Backend> Actor<B> {
pub fn new(input_size: usize, dense_size: usize, output_size: usize) -> Self {
Self {
linear_0: LinearConfig::new(input_size, dense_size).init(&Default::default()),
linear_1: LinearConfig::new(dense_size, dense_size).init(&Default::default()),
linear_2: LinearConfig::new(dense_size, output_size).init(&Default::default()),
}
}
}
impl<B: Backend> Model<B, Tensor<B, 2>, Tensor<B, 2>> for Actor<B> {
fn forward(&self, input: Tensor<B, 2>) -> Tensor<B, 2> {
let layer_0_output = relu(self.linear_0.forward(input));
let layer_1_output = relu(self.linear_1.forward(layer_0_output));
softmax(self.linear_2.forward(layer_1_output), 1)
}
fn infer(&self, input: Tensor<B, 2>) -> Tensor<B, 2> {
self.forward(input)
}
}
impl<B: Backend> SACActor<B> for Actor<B> {}
#[derive(Module, Debug)]
pub struct Critic<B: Backend> {
linear_0: Linear<B>,
linear_1: Linear<B>,
linear_2: Linear<B>,
}
impl<B: Backend> Critic<B> {
pub fn new(input_size: usize, dense_size: usize, output_size: usize) -> Self {
Self {
linear_0: LinearConfig::new(input_size, dense_size).init(&Default::default()),
linear_1: LinearConfig::new(dense_size, dense_size).init(&Default::default()),
linear_2: LinearConfig::new(dense_size, output_size).init(&Default::default()),
}
}
fn consume(self) -> (Linear<B>, Linear<B>, Linear<B>) {
(self.linear_0, self.linear_1, self.linear_2)
}
}
impl<B: Backend> Model<B, Tensor<B, 2>, Tensor<B, 2>> for Critic<B> {
fn forward(&self, input: Tensor<B, 2>) -> Tensor<B, 2> {
let layer_0_output = relu(self.linear_0.forward(input));
let layer_1_output = relu(self.linear_1.forward(layer_0_output));
self.linear_2.forward(layer_1_output)
}
fn infer(&self, input: Tensor<B, 2>) -> Tensor<B, 2> {
self.forward(input)
}
}
impl<B: Backend> SACCritic<B> for Critic<B> {
fn soft_update(this: Self, that: &Self, tau: ElemType) -> Self {
let (linear_0, linear_1, linear_2) = this.consume();
Self {
linear_0: soft_update_linear(linear_0, &that.linear_0, tau),
linear_1: soft_update_linear(linear_1, &that.linear_1, tau),
linear_2: soft_update_linear(linear_2, &that.linear_2, tau),
}
}
}
#[allow(unused)]
const MEMORY_SIZE: usize = 4096;
type MyAgent<E, B> = SAC<E, B, Actor<B>>;
#[allow(unused)]
pub fn run<
E: Environment + AsMut<TrictracEnvironment>,
B: AutodiffBackend<InnerBackend = NdArray>,
>(
conf: &Config,
visualized: bool,
) -> impl Agent<E> {
let mut env = E::new(visualized);
env.as_mut().max_steps = conf.max_steps;
let state_dim = <<E as Environment>::StateType as State>::size();
let action_dim = <<E as Environment>::ActionType as Action>::size();
let actor = Actor::<B>::new(state_dim, conf.dense_size, action_dim);
let critic_1 = Critic::<B>::new(state_dim, conf.dense_size, action_dim);
let critic_2 = Critic::<B>::new(state_dim, conf.dense_size, action_dim);
let mut nets = SACNets::<B, Actor<B>, Critic<B>>::new(actor, critic_1, critic_2);
let mut agent = MyAgent::default();
let config = SACTrainingConfig {
gamma: conf.gamma,
tau: conf.tau,
learning_rate: conf.learning_rate,
min_probability: conf.min_probability,
batch_size: conf.batch_size,
clip_grad: Some(burn::grad_clipping::GradientClippingConfig::Value(
conf.clip_grad,
)),
};
let mut memory = Memory::<E, B, MEMORY_SIZE>::default();
let optimizer_config = AdamWConfig::new().with_grad_clipping(config.clip_grad.clone());
let mut optimizer = SACOptimizer::new(
optimizer_config.clone().init(),
optimizer_config.clone().init(),
optimizer_config.clone().init(),
optimizer_config.init(),
);
let mut step = 0_usize;
for episode in 0..conf.num_episodes {
let mut episode_done = false;
let mut episode_reward = 0.0;
let mut episode_duration = 0_usize;
let mut state = env.state();
let mut now = SystemTime::now();
while !episode_done {
if let Some(action) = MyAgent::<E, _>::react_with_model(&state, &nets.actor) {
let snapshot = env.step(action);
episode_reward += <<E as Environment>::RewardType as Into<ElemType>>::into(
snapshot.reward().clone(),
);
memory.push(
state,
*snapshot.state(),
action,
snapshot.reward().clone(),
snapshot.done(),
);
if config.batch_size < memory.len() {
nets = agent.train::<MEMORY_SIZE, _>(nets, &memory, &mut optimizer, &config);
}
step += 1;
episode_duration += 1;
if snapshot.done() || episode_duration >= conf.max_steps {
env.reset();
episode_done = true;
println!(
"{{\"episode\": {episode}, \"reward\": {episode_reward:.4}, \"steps count\": {episode_duration}, \"duration\": {}}}",
now.elapsed().unwrap().as_secs()
);
now = SystemTime::now();
} else {
state = *snapshot.state();
}
}
}
}
let valid_agent = agent.valid(nets.actor);
if let Some(path) = &conf.save_path {
if let Some(model) = valid_agent.model() {
save_model(model, path);
}
}
valid_agent
}
pub fn save_model(model: &Actor<NdArray<ElemType>>, path: &String) {
let recorder = CompactRecorder::new();
let model_path = format!("{path}.mpk");
println!("info: Modèle de validation sauvegardé : {model_path}");
recorder
.record(model.clone().into_record(), model_path.into())
.unwrap();
}
pub fn load_model(dense_size: usize, path: &String) -> Option<Actor<NdArray<ElemType>>> {
let model_path = format!("{path}.mpk");
// println!("Chargement du modèle depuis : {model_path}");
CompactRecorder::new()
.load(model_path.into(), &NdArrayDevice::default())
.map(|record| {
Actor::new(
<TrictracEnvironment as Environment>::StateType::size(),
dense_size,
<TrictracEnvironment as Environment>::ActionType::size(),
)
.load_record(record)
})
.ok()
}

View file

@ -1,470 +0,0 @@
use crate::training_common_big;
use burn::{prelude::Backend, tensor::Tensor};
use burn_rl::base::{Action, Environment, Snapshot, State};
use rand::{thread_rng, Rng};
use store::{GameEvent, GameState, PlayerId, PointsRules, Stage, TurnStage};
const ERROR_REWARD: f32 = -1.00012121;
const REWARD_VALID_MOVE: f32 = 1.00012121;
const REWARD_RATIO: f32 = 0.1;
const WIN_POINTS: f32 = 100.0;
/// État du jeu Trictrac pour burn-rl
#[derive(Debug, Clone, Copy)]
pub struct TrictracState {
pub data: [i8; 36], // Représentation vectorielle de l'état du jeu
}
impl State for TrictracState {
type Data = [i8; 36];
fn to_tensor<B: Backend>(&self) -> Tensor<B, 1> {
Tensor::from_floats(self.data, &B::Device::default())
}
fn size() -> usize {
36
}
}
impl TrictracState {
/// Convertit un GameState en TrictracState
pub fn from_game_state(game_state: &GameState) -> Self {
let state_vec = game_state.to_vec();
let mut data = [0; 36];
// Copier les données en s'assurant qu'on ne dépasse pas la taille
let copy_len = state_vec.len().min(36);
data[..copy_len].copy_from_slice(&state_vec[..copy_len]);
TrictracState { data }
}
}
/// Actions possibles dans Trictrac pour burn-rl
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct TrictracAction {
// u32 as required by burn_rl::base::Action type
pub index: u32,
}
impl Action for TrictracAction {
fn random() -> Self {
use rand::{thread_rng, Rng};
let mut rng = thread_rng();
TrictracAction {
index: rng.gen_range(0..Self::size() as u32),
}
}
fn enumerate() -> Vec<Self> {
(0..Self::size() as u32)
.map(|index| TrictracAction { index })
.collect()
}
fn size() -> usize {
1252
}
}
impl From<u32> for TrictracAction {
fn from(index: u32) -> Self {
TrictracAction { index }
}
}
impl From<TrictracAction> for u32 {
fn from(action: TrictracAction) -> u32 {
action.index
}
}
/// Environnement Trictrac pour burn-rl
#[derive(Debug)]
pub struct TrictracEnvironment {
pub game: GameState,
active_player_id: PlayerId,
opponent_id: PlayerId,
current_state: TrictracState,
episode_reward: f32,
pub step_count: usize,
pub max_steps: usize,
pub pointrolls_count: usize,
pub goodmoves_count: usize,
pub goodmoves_ratio: f32,
pub visualized: bool,
}
impl Environment for TrictracEnvironment {
type StateType = TrictracState;
type ActionType = TrictracAction;
type RewardType = f32;
fn new(visualized: bool) -> Self {
let mut game = GameState::new(false);
// Ajouter deux joueurs
game.init_player("DQN Agent");
game.init_player("Opponent");
let player1_id = 1;
let player2_id = 2;
// Commencer la partie
game.consume(&GameEvent::BeginGame { goes_first: 1 });
let current_state = TrictracState::from_game_state(&game);
TrictracEnvironment {
game,
active_player_id: player1_id,
opponent_id: player2_id,
current_state,
episode_reward: 0.0,
step_count: 0,
max_steps: 2000,
pointrolls_count: 0,
goodmoves_count: 0,
goodmoves_ratio: 0.0,
visualized,
}
}
fn state(&self) -> Self::StateType {
self.current_state
}
fn reset(&mut self) -> Snapshot<Self> {
// Réinitialiser le jeu
self.game = GameState::new(false);
self.game.init_player("DQN Agent");
self.game.init_player("Opponent");
// Commencer la partie
self.game.consume(&GameEvent::BeginGame { goes_first: 1 });
self.current_state = TrictracState::from_game_state(&self.game);
self.episode_reward = 0.0;
self.goodmoves_ratio = if self.step_count == 0 {
0.0
} else {
self.goodmoves_count as f32 / self.step_count as f32
};
println!(
"info: correct moves: {} ({}%)",
self.goodmoves_count,
(100.0 * self.goodmoves_ratio).round() as u32
);
self.step_count = 0;
self.pointrolls_count = 0;
self.goodmoves_count = 0;
Snapshot::new(self.current_state, 0.0, false)
}
fn step(&mut self, action: Self::ActionType) -> Snapshot<Self> {
self.step_count += 1;
// Convertir l'action burn-rl vers une action Trictrac
let trictrac_action = Self::convert_action(action);
let mut reward = 0.0;
let is_rollpoint;
// Exécuter l'action si c'est le tour de l'agent DQN
if self.game.active_player_id == self.active_player_id {
if let Some(action) = trictrac_action {
(reward, is_rollpoint) = self.execute_action(action);
if is_rollpoint {
self.pointrolls_count += 1;
}
if reward != ERROR_REWARD {
self.goodmoves_count += 1;
// println!("{str_action}");
}
} else {
// Action non convertible, pénalité
reward = -0.5;
}
}
// Faire jouer l'adversaire (stratégie simple)
while self.game.active_player_id == self.opponent_id && self.game.stage != Stage::Ended {
// print!(":");
reward += self.play_opponent_if_needed();
}
// Vérifier si la partie est terminée
// let max_steps = self.max_steps
// let max_steps = self.min_steps
// + (self.max_steps as f32 - self.min_steps)
// * f32::exp((self.goodmoves_ratio - 1.0) / 0.25);
let done = self.game.stage == Stage::Ended || self.game.determine_winner().is_some();
if done {
// Récompense finale basée sur le résultat
if let Some(winner_id) = self.game.determine_winner() {
if winner_id == self.active_player_id {
reward += WIN_POINTS; // Victoire
} else {
reward -= WIN_POINTS; // Défaite
}
}
}
let terminated = done || self.step_count >= self.max_steps;
// Mettre à jour l'état
self.current_state = TrictracState::from_game_state(&self.game);
self.episode_reward += reward;
if self.visualized && terminated {
println!(
"Episode terminé. Récompense totale: {:.2}, Étapes: {}",
self.episode_reward, self.step_count
);
}
Snapshot::new(self.current_state, reward, terminated)
}
}
impl TrictracEnvironment {
/// Convertit une action burn-rl vers une action Trictrac
pub fn convert_action(action: TrictracAction) -> Option<training_common_big::TrictracAction> {
training_common_big::TrictracAction::from_action_index(action.index.try_into().unwrap())
}
/// Convertit l'index d'une action au sein des actions valides vers une action Trictrac
#[allow(dead_code)]
fn convert_valid_action_index(
&self,
action: TrictracAction,
game_state: &GameState,
) -> Option<training_common_big::TrictracAction> {
use training_common_big::get_valid_actions;
// Obtenir les actions valides dans le contexte actuel
let valid_actions = get_valid_actions(game_state);
if valid_actions.is_empty() {
return None;
}
// Mapper l'index d'action sur une action valide
let action_index = (action.index as usize) % valid_actions.len();
Some(valid_actions[action_index].clone())
}
/// Exécute une action Trictrac dans le jeu
// fn execute_action(
// &mut self,
// action:training_common_big::TrictracAction,
// ) -> Result<f32, Box<dyn std::error::Error>> {
fn execute_action(&mut self, action: training_common_big::TrictracAction) -> (f32, bool) {
use training_common_big::TrictracAction;
let mut reward = 0.0;
let mut is_rollpoint = false;
let mut need_roll = false;
let event = match action {
TrictracAction::Roll => {
// Lancer les dés
need_roll = true;
Some(GameEvent::Roll {
player_id: self.active_player_id,
})
}
// TrictracAction::Mark => {
// // Marquer des points
// let points = self.game.
// reward += 0.1 * points as f32;
// Some(GameEvent::Mark {
// player_id: self.active_player_id,
// points,
// })
// }
TrictracAction::Go => {
// Continuer après avoir gagné un trou
Some(GameEvent::Go {
player_id: self.active_player_id,
})
}
TrictracAction::Move {
dice_order,
from1,
from2,
} => {
// Effectuer un mouvement
let (dice1, dice2) = if dice_order {
(self.game.dice.values.0, self.game.dice.values.1)
} else {
(self.game.dice.values.1, self.game.dice.values.0)
};
let mut to1 = from1 + dice1 as usize;
let mut to2 = from2 + dice2 as usize;
// Gestion prise de coin par puissance
let opp_rest_field = 13;
if to1 == opp_rest_field && to2 == opp_rest_field {
to1 -= 1;
to2 -= 1;
}
let checker_move1 = store::CheckerMove::new(from1, to1).unwrap_or_default();
let checker_move2 = store::CheckerMove::new(from2, to2).unwrap_or_default();
Some(GameEvent::Move {
player_id: self.active_player_id,
moves: (checker_move1, checker_move2),
})
}
};
// Appliquer l'événement si valide
if let Some(event) = event {
if self.game.validate(&event) {
self.game.consume(&event);
reward += REWARD_VALID_MOVE;
// Simuler le résultat des dés après un Roll
// if matches!(action, TrictracAction::Roll) {
if need_roll {
let mut rng = thread_rng();
let dice_values = (rng.gen_range(1..=6), rng.gen_range(1..=6));
let dice_event = GameEvent::RollResult {
player_id: self.active_player_id,
dice: store::Dice {
values: dice_values,
},
};
// print!("o");
if self.game.validate(&dice_event) {
self.game.consume(&dice_event);
let (points, adv_points) = self.game.dice_points;
reward += REWARD_RATIO * (points - adv_points) as f32;
if points > 0 {
is_rollpoint = true;
// println!("info: rolled for {reward}");
}
// Récompense proportionnelle aux points
}
}
} else {
// Pénalité pour action invalide
// on annule les précédents reward
// et on indique une valeur reconnaissable pour statistiques
reward = ERROR_REWARD;
self.game.mark_points_for_bot_training(self.opponent_id, 1);
}
}
(reward, is_rollpoint)
}
/// Fait jouer l'adversaire avec une stratégie simple
fn play_opponent_if_needed(&mut self) -> f32 {
// print!("z?");
let mut reward = 0.0;
// Si c'est le tour de l'adversaire, jouer automatiquement
if self.game.active_player_id == self.opponent_id && self.game.stage != Stage::Ended {
// Utiliser la stratégie default pour l'adversaire
use crate::BotStrategy;
let mut strategy = crate::strategy::random::RandomStrategy::default();
strategy.set_player_id(self.opponent_id);
if let Some(color) = self.game.player_color_by_id(&self.opponent_id) {
strategy.set_color(color);
}
*strategy.get_mut_game() = self.game.clone();
// Exécuter l'action selon le turn_stage
let mut calculate_points = false;
let opponent_color = store::Color::Black;
let event = match self.game.turn_stage {
TurnStage::RollDice => GameEvent::Roll {
player_id: self.opponent_id,
},
TurnStage::RollWaiting => {
let mut rng = thread_rng();
let dice_values = (rng.gen_range(1..=6), rng.gen_range(1..=6));
calculate_points = true; // comment to replicate burnrl_before
GameEvent::RollResult {
player_id: self.opponent_id,
dice: store::Dice {
values: dice_values,
},
}
}
TurnStage::MarkPoints => {
panic!("in play_opponent_if_needed > TurnStage::MarkPoints");
// let dice_roll_count = self
// .game
// .players
// .get(&self.opponent_id)
// .unwrap()
// .dice_roll_count;
// let points_rules =
// PointsRules::new(&opponent_color, &self.game.board, self.game.dice);
// GameEvent::Mark {
// player_id: self.opponent_id,
// points: points_rules.get_points(dice_roll_count).0,
// }
}
TurnStage::MarkAdvPoints => {
let dice_roll_count = self
.game
.players
.get(&self.opponent_id)
.unwrap()
.dice_roll_count;
let points_rules =
PointsRules::new(&opponent_color, &self.game.board, self.game.dice);
// pas de reward : déjà comptabilisé lors du tour de blanc
GameEvent::Mark {
player_id: self.opponent_id,
points: points_rules.get_points(dice_roll_count).1,
}
}
TurnStage::HoldOrGoChoice => {
// Stratégie simple : toujours continuer
GameEvent::Go {
player_id: self.opponent_id,
}
}
TurnStage::Move => GameEvent::Move {
player_id: self.opponent_id,
moves: strategy.choose_move(),
},
};
if self.game.validate(&event) {
self.game.consume(&event);
// print!(".");
if calculate_points {
// print!("x");
let dice_roll_count = self
.game
.players
.get(&self.opponent_id)
.unwrap()
.dice_roll_count;
let points_rules =
PointsRules::new(&opponent_color, &self.game.board, self.game.dice);
let (points, adv_points) = points_rules.get_points(dice_roll_count);
// Récompense proportionnelle aux points
let adv_reward = REWARD_RATIO * (points - adv_points) as f32;
reward -= adv_reward;
// if adv_reward != 0.0 {
// println!("info: opponent : {adv_reward} -> {reward}");
// }
}
}
}
reward
}
}
impl AsMut<TrictracEnvironment> for TrictracEnvironment {
fn as_mut(&mut self) -> &mut Self {
self
}
}

View file

@ -1,9 +1,12 @@
use crate::training_common_big;
use crate::training_common;
use burn::{prelude::Backend, tensor::Tensor};
use burn_rl::base::{Action, Environment, Snapshot, State};
use rand::{thread_rng, Rng};
use store::{GameEvent, GameState, PlayerId, PointsRules, Stage, TurnStage};
const ERROR_REWARD: f32 = -1.0012121;
const REWARD_RATIO: f32 = 0.1;
/// État du jeu Trictrac pour burn-rl
#[derive(Debug, Clone, Copy)]
pub struct TrictracState {
@ -214,16 +217,16 @@ impl TrictracEnvironment {
const REWARD_RATIO: f32 = 1.0;
/// Convertit une action burn-rl vers une action Trictrac
pub fn convert_action(action: TrictracAction) -> Option<training_common_big::TrictracAction> {
training_common_big::TrictracAction::from_action_index(action.index.try_into().unwrap())
pub fn convert_action(action: TrictracAction) -> Option<training_common::TrictracAction> {
training_common::TrictracAction::from_action_index(action.index.try_into().unwrap())
}
/// Convertit l'index d'une action au sein des actions valides vers une action Trictrac
fn convert_valid_action_index(
&self,
action: TrictracAction,
) -> Option<training_common_big::TrictracAction> {
use training_common_big::get_valid_actions;
) -> Option<training_common::TrictracAction> {
use training_common::get_valid_actions;
// Obtenir les actions valides dans le contexte actuel
let valid_actions = get_valid_actions(&self.game);
@ -240,72 +243,19 @@ impl TrictracEnvironment {
/// Exécute une action Trictrac dans le jeu
// fn execute_action(
// &mut self,
// action: training_common_big::TrictracAction,
// action: training_common::TrictracAction,
// ) -> Result<f32, Box<dyn std::error::Error>> {
fn execute_action(&mut self, action: training_common_big::TrictracAction) -> (f32, bool) {
use training_common_big::TrictracAction;
fn execute_action(&mut self, action: training_common::TrictracAction) -> (f32, bool) {
use training_common::TrictracAction;
let mut reward = 0.0;
let mut is_rollpoint = false;
let event = match action {
TrictracAction::Roll => {
// Lancer les dés
Some(GameEvent::Roll {
player_id: self.active_player_id,
})
}
// TrictracAction::Mark => {
// // Marquer des points
// let points = self.game.
// reward += 0.1 * points as f32;
// Some(GameEvent::Mark {
// player_id: self.active_player_id,
// points,
// })
// }
TrictracAction::Go => {
// Continuer après avoir gagné un trou
Some(GameEvent::Go {
player_id: self.active_player_id,
})
}
TrictracAction::Move {
dice_order,
from1,
from2,
} => {
// Effectuer un mouvement
let (dice1, dice2) = if dice_order {
(self.game.dice.values.0, self.game.dice.values.1)
} else {
(self.game.dice.values.1, self.game.dice.values.0)
};
let mut to1 = from1 + dice1 as usize;
let mut to2 = from2 + dice2 as usize;
// Gestion prise de coin par puissance
let opp_rest_field = 13;
if to1 == opp_rest_field && to2 == opp_rest_field {
to1 -= 1;
to2 -= 1;
}
let checker_move1 = store::CheckerMove::new(from1, to1).unwrap_or_default();
let checker_move2 = store::CheckerMove::new(from2, to2).unwrap_or_default();
Some(GameEvent::Move {
player_id: self.active_player_id,
moves: (checker_move1, checker_move2),
})
}
};
// Appliquer l'événement si valide
if let Some(event) = event {
if let Some(event) = action.to_event(&self.game) {
if self.game.validate(&event) {
self.game.consume(&event);
// reward += REWARD_VALID_MOVE;
// Simuler le résultat des dés après un Roll
if matches!(action, TrictracAction::Roll) {
let mut rng = thread_rng();
@ -319,7 +269,7 @@ impl TrictracEnvironment {
if self.game.validate(&dice_event) {
self.game.consume(&dice_event);
let (points, adv_points) = self.game.dice_points;
reward += Self::REWARD_RATIO * (points - adv_points) as f32;
reward += REWARD_RATIO * (points as f32 - adv_points as f32);
if points > 0 {
is_rollpoint = true;
// println!("info: rolled for {reward}");
@ -331,9 +281,12 @@ impl TrictracEnvironment {
// Pénalité pour action invalide
// on annule les précédents reward
// et on indique une valeur reconnaissable pour statistiques
println!("info: action invalide -> err_reward");
reward = Self::ERROR_REWARD;
reward = ERROR_REWARD;
self.game.mark_points_for_bot_training(self.opponent_id, 1);
}
} else {
reward = ERROR_REWARD;
self.game.mark_points_for_bot_training(self.opponent_id, 1);
}
(reward, is_rollpoint)

View file

@ -1,8 +1,5 @@
use bot::burnrl::algos::{
dqn, dqn_big, dqn_valid, ppo, ppo_big, ppo_valid, sac, sac_big, sac_valid,
};
use bot::burnrl::algos::{dqn, dqn_valid, ppo, ppo_valid, sac, sac_valid};
use bot::burnrl::environment::TrictracEnvironment;
use bot::burnrl::environment_big::TrictracEnvironment as TrictracEnvironmentBig;
use bot::burnrl::environment_valid::TrictracEnvironment as TrictracEnvironmentValid;
use bot::burnrl::utils::{demo_model, Config};
use burn::backend::{Autodiff, NdArray};
@ -36,16 +33,6 @@ fn main() {
println!("> Test avec le modèle chargé");
demo_model(loaded_agent);
}
"dqn_big" => {
let _agent = dqn_big::run::<TrictracEnvironmentBig, Backend>(&conf, false);
println!("> Chargement du modèle pour test");
let loaded_model = dqn_big::load_model(conf.dense_size, &path);
let loaded_agent: burn_rl::agent::DQN<TrictracEnvironmentBig, _, _> =
burn_rl::agent::DQN::new(loaded_model.unwrap());
println!("> Test avec le modèle chargé");
demo_model(loaded_agent);
}
"dqn_valid" => {
let _agent = dqn_valid::run::<TrictracEnvironmentValid, Backend>(&conf, false);
println!("> Chargement du modèle pour test");
@ -66,16 +53,6 @@ fn main() {
println!("> Test avec le modèle chargé");
demo_model(loaded_agent);
}
"sac_big" => {
let _agent = sac_big::run::<TrictracEnvironmentBig, Backend>(&conf, false);
println!("> Chargement du modèle pour test");
let loaded_model = sac_big::load_model(conf.dense_size, &path);
let loaded_agent: burn_rl::agent::SAC<TrictracEnvironmentBig, _, _> =
burn_rl::agent::SAC::new(loaded_model.unwrap());
println!("> Test avec le modèle chargé");
demo_model(loaded_agent);
}
"sac_valid" => {
let _agent = sac_valid::run::<TrictracEnvironmentValid, Backend>(&conf, false);
println!("> Chargement du modèle pour test");
@ -96,16 +73,6 @@ fn main() {
println!("> Test avec le modèle chargé");
demo_model(loaded_agent);
}
"ppo_big" => {
let _agent = ppo_big::run::<TrictracEnvironmentBig, Backend>(&conf, false);
println!("> Chargement du modèle pour test");
let loaded_model = ppo_big::load_model(conf.dense_size, &path);
let loaded_agent: burn_rl::agent::PPO<TrictracEnvironmentBig, _, _> =
burn_rl::agent::PPO::new(loaded_model.unwrap());
println!("> Test avec le modèle chargé");
demo_model(loaded_agent);
}
"ppo_valid" => {
let _agent = ppo_valid::run::<TrictracEnvironmentValid, Backend>(&conf, false);
println!("> Chargement du modèle pour test");

View file

@ -1,5 +1,4 @@
pub mod algos;
pub mod environment;
pub mod environment_big;
pub mod environment_valid;
pub mod utils;