refacto
This commit is contained in:
parent
ad58c0ec60
commit
2e0a874879
21 changed files with 23 additions and 1051 deletions
|
|
@ -1,149 +0,0 @@
|
|||
use crate::burnrl::utils::soft_update_linear;
|
||||
use burn::module::Module;
|
||||
use burn::nn::{Linear, LinearConfig};
|
||||
use burn::optim::AdamWConfig;
|
||||
use burn::record::{CompactRecorder, Recorder};
|
||||
use burn::tensor::activation::relu;
|
||||
use burn::tensor::backend::{AutodiffBackend, Backend};
|
||||
use burn::tensor::Tensor;
|
||||
use burn_rl::agent::DQN;
|
||||
use burn_rl::agent::{DQNModel, DQNTrainingConfig};
|
||||
use burn_rl::base::{Action, Agent, ElemType, Environment, Memory, Model, State};
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
#[derive(Module, Debug)]
|
||||
pub struct Net<B: Backend> {
|
||||
linear_0: Linear<B>,
|
||||
linear_1: Linear<B>,
|
||||
linear_2: Linear<B>,
|
||||
}
|
||||
|
||||
impl<B: Backend> Net<B> {
|
||||
#[allow(unused)]
|
||||
pub fn new(input_size: usize, dense_size: usize, output_size: usize) -> Self {
|
||||
Self {
|
||||
linear_0: LinearConfig::new(input_size, dense_size).init(&Default::default()),
|
||||
linear_1: LinearConfig::new(dense_size, dense_size).init(&Default::default()),
|
||||
linear_2: LinearConfig::new(dense_size, output_size).init(&Default::default()),
|
||||
}
|
||||
}
|
||||
|
||||
fn consume(self) -> (Linear<B>, Linear<B>, Linear<B>) {
|
||||
(self.linear_0, self.linear_1, self.linear_2)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> Model<B, Tensor<B, 2>, Tensor<B, 2>> for Net<B> {
|
||||
fn forward(&self, input: Tensor<B, 2>) -> Tensor<B, 2> {
|
||||
let layer_0_output = relu(self.linear_0.forward(input));
|
||||
let layer_1_output = relu(self.linear_1.forward(layer_0_output));
|
||||
|
||||
relu(self.linear_2.forward(layer_1_output))
|
||||
}
|
||||
|
||||
fn infer(&self, input: Tensor<B, 2>) -> Tensor<B, 2> {
|
||||
self.forward(input)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Backend> DQNModel<B> for Net<B> {
|
||||
fn soft_update(this: Self, that: &Self, tau: ElemType) -> Self {
|
||||
let (linear_0, linear_1, linear_2) = this.consume();
|
||||
|
||||
Self {
|
||||
linear_0: soft_update_linear(linear_0, &that.linear_0, tau),
|
||||
linear_1: soft_update_linear(linear_1, &that.linear_1, tau),
|
||||
linear_2: soft_update_linear(linear_2, &that.linear_2, tau),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
const MEMORY_SIZE: usize = 4096;
|
||||
const DENSE_SIZE: usize = 128;
|
||||
const EPS_DECAY: f64 = 1000.0;
|
||||
const EPS_START: f64 = 0.9;
|
||||
const EPS_END: f64 = 0.05;
|
||||
|
||||
type MyAgent<E, B> = DQN<E, B, Net<B>>;
|
||||
|
||||
#[allow(unused)]
|
||||
pub fn run<E: Environment, B: AutodiffBackend>(
|
||||
num_episodes: usize,
|
||||
visualized: bool,
|
||||
) -> DQN<E, B, Net<B>> {
|
||||
// ) -> impl Agent<E> {
|
||||
let mut env = E::new(visualized);
|
||||
|
||||
let model = Net::<B>::new(
|
||||
<<E as Environment>::StateType as State>::size(),
|
||||
DENSE_SIZE,
|
||||
<<E as Environment>::ActionType as Action>::size(),
|
||||
);
|
||||
|
||||
let mut agent = MyAgent::new(model);
|
||||
|
||||
let config = DQNTrainingConfig::default();
|
||||
|
||||
let mut memory = Memory::<E, B, MEMORY_SIZE>::default();
|
||||
|
||||
let mut optimizer = AdamWConfig::new()
|
||||
.with_grad_clipping(config.clip_grad.clone())
|
||||
.init();
|
||||
|
||||
let mut policy_net = agent.model().as_ref().unwrap().clone();
|
||||
|
||||
let mut step = 0_usize;
|
||||
|
||||
for episode in 0..num_episodes {
|
||||
let mut episode_done = false;
|
||||
let mut episode_reward: ElemType = 0.0;
|
||||
let mut episode_duration = 0_usize;
|
||||
let mut state = env.state();
|
||||
let mut now = SystemTime::now();
|
||||
|
||||
while !episode_done {
|
||||
let eps_threshold =
|
||||
EPS_END + (EPS_START - EPS_END) * f64::exp(-(step as f64) / EPS_DECAY);
|
||||
let action =
|
||||
DQN::<E, B, Net<B>>::react_with_exploration(&policy_net, state, eps_threshold);
|
||||
let snapshot = env.step(action);
|
||||
|
||||
episode_reward +=
|
||||
<<E as Environment>::RewardType as Into<ElemType>>::into(snapshot.reward().clone());
|
||||
|
||||
memory.push(
|
||||
state,
|
||||
*snapshot.state(),
|
||||
action,
|
||||
snapshot.reward().clone(),
|
||||
snapshot.done(),
|
||||
);
|
||||
|
||||
if config.batch_size < memory.len() {
|
||||
policy_net =
|
||||
agent.train::<MEMORY_SIZE>(policy_net, &memory, &mut optimizer, &config);
|
||||
}
|
||||
|
||||
step += 1;
|
||||
episode_duration += 1;
|
||||
|
||||
if snapshot.done() || episode_duration >= E::MAX_STEPS {
|
||||
env.reset();
|
||||
episode_done = true;
|
||||
|
||||
println!(
|
||||
"{{\"episode\": {}, \"reward\": {:.4}, \"steps count\": {}, \"duration\": {}}}",
|
||||
episode,
|
||||
episode_reward,
|
||||
episode_duration,
|
||||
now.elapsed().unwrap().as_secs()
|
||||
);
|
||||
now = SystemTime::now();
|
||||
} else {
|
||||
state = *snapshot.state();
|
||||
}
|
||||
}
|
||||
}
|
||||
agent
|
||||
}
|
||||
|
|
@ -1,396 +0,0 @@
|
|||
use crate::strategy::dqn_common;
|
||||
use burn::{prelude::Backend, tensor::Tensor};
|
||||
use burn_rl::base::{Action, Environment, Snapshot, State};
|
||||
use rand::{thread_rng, Rng};
|
||||
use store::{GameEvent, GameState, PlayerId, PointsRules, Stage, TurnStage};
|
||||
|
||||
/// État du jeu Trictrac pour burn-rl
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct TrictracState {
|
||||
pub data: [f32; 36], // Représentation vectorielle de l'état du jeu
|
||||
}
|
||||
|
||||
impl State for TrictracState {
|
||||
type Data = [f32; 36];
|
||||
|
||||
fn to_tensor<B: Backend>(&self) -> Tensor<B, 1> {
|
||||
Tensor::from_floats(self.data, &B::Device::default())
|
||||
}
|
||||
|
||||
fn size() -> usize {
|
||||
36
|
||||
}
|
||||
}
|
||||
|
||||
impl TrictracState {
|
||||
/// Convertit un GameState en TrictracState
|
||||
pub fn from_game_state(game_state: &GameState) -> Self {
|
||||
let state_vec = game_state.to_vec_float();
|
||||
let mut data = [0.0; 36];
|
||||
|
||||
// Copier les données en s'assurant qu'on ne dépasse pas la taille
|
||||
let copy_len = state_vec.len().min(36);
|
||||
data[..copy_len].copy_from_slice(&state_vec[..copy_len]);
|
||||
|
||||
TrictracState { data }
|
||||
}
|
||||
}
|
||||
|
||||
/// Actions possibles dans Trictrac pour burn-rl
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub struct TrictracAction {
|
||||
pub index: u32,
|
||||
}
|
||||
|
||||
impl Action for TrictracAction {
|
||||
fn random() -> Self {
|
||||
use rand::{thread_rng, Rng};
|
||||
let mut rng = thread_rng();
|
||||
TrictracAction {
|
||||
index: rng.gen_range(0..Self::size() as u32),
|
||||
}
|
||||
}
|
||||
|
||||
fn enumerate() -> Vec<Self> {
|
||||
(0..Self::size() as u32)
|
||||
.map(|index| TrictracAction { index })
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn size() -> usize {
|
||||
1252
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u32> for TrictracAction {
|
||||
fn from(index: u32) -> Self {
|
||||
TrictracAction { index }
|
||||
}
|
||||
}
|
||||
|
||||
impl From<TrictracAction> for u32 {
|
||||
fn from(action: TrictracAction) -> u32 {
|
||||
action.index
|
||||
}
|
||||
}
|
||||
|
||||
/// Environnement Trictrac pour burn-rl
|
||||
#[derive(Debug)]
|
||||
pub struct TrictracEnvironment {
|
||||
pub game: GameState,
|
||||
active_player_id: PlayerId,
|
||||
opponent_id: PlayerId,
|
||||
current_state: TrictracState,
|
||||
episode_reward: f32,
|
||||
step_count: usize,
|
||||
pub visualized: bool,
|
||||
}
|
||||
|
||||
impl Environment for TrictracEnvironment {
|
||||
type StateType = TrictracState;
|
||||
type ActionType = TrictracAction;
|
||||
type RewardType = f32;
|
||||
|
||||
const MAX_STEPS: usize = 1000; // Limite max pour éviter les parties infinies
|
||||
// const MAX_STEPS: usize = 5; // Limite max pour éviter les parties infinies
|
||||
|
||||
fn new(visualized: bool) -> Self {
|
||||
let mut game = GameState::new(false);
|
||||
|
||||
// Ajouter deux joueurs
|
||||
game.init_player("DQN Agent");
|
||||
game.init_player("Opponent");
|
||||
let player1_id = 1;
|
||||
let player2_id = 2;
|
||||
|
||||
// Commencer la partie
|
||||
game.consume(&GameEvent::BeginGame { goes_first: 1 });
|
||||
|
||||
let current_state = TrictracState::from_game_state(&game);
|
||||
TrictracEnvironment {
|
||||
game,
|
||||
active_player_id: player1_id,
|
||||
opponent_id: player2_id,
|
||||
current_state,
|
||||
episode_reward: 0.0,
|
||||
step_count: 0,
|
||||
visualized,
|
||||
}
|
||||
}
|
||||
|
||||
fn state(&self) -> Self::StateType {
|
||||
self.current_state
|
||||
}
|
||||
|
||||
fn reset(&mut self) -> Snapshot<Self> {
|
||||
// Réinitialiser le jeu
|
||||
self.game = GameState::new(false);
|
||||
self.game.init_player("DQN Agent");
|
||||
self.game.init_player("Opponent");
|
||||
|
||||
// Commencer la partie
|
||||
self.game.consume(&GameEvent::BeginGame { goes_first: 1 });
|
||||
|
||||
self.current_state = TrictracState::from_game_state(&self.game);
|
||||
self.episode_reward = 0.0;
|
||||
self.step_count = 0;
|
||||
|
||||
Snapshot::new(self.current_state, 0.0, false)
|
||||
}
|
||||
|
||||
fn step(&mut self, action: Self::ActionType) -> Snapshot<Self> {
|
||||
self.step_count += 1;
|
||||
|
||||
// Convertir l'action burn-rl vers une action Trictrac
|
||||
let trictrac_action = self.convert_action(action, &self.game);
|
||||
|
||||
let mut reward = 0.0;
|
||||
let mut terminated = false;
|
||||
|
||||
// Exécuter l'action si c'est le tour de l'agent DQN
|
||||
if self.game.active_player_id == self.active_player_id {
|
||||
if let Some(action) = trictrac_action {
|
||||
match self.execute_action(action) {
|
||||
Ok(action_reward) => {
|
||||
reward = action_reward;
|
||||
}
|
||||
Err(_) => {
|
||||
// Action invalide, pénalité
|
||||
reward = -1.0;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Action non convertible, pénalité
|
||||
reward = -0.5;
|
||||
}
|
||||
}
|
||||
|
||||
// Faire jouer l'adversaire (stratégie simple)
|
||||
while self.game.active_player_id == self.opponent_id && self.game.stage != Stage::Ended {
|
||||
reward += self.play_opponent_if_needed();
|
||||
}
|
||||
|
||||
// Vérifier si la partie est terminée
|
||||
let done = self.game.stage == Stage::Ended
|
||||
|| self.game.determine_winner().is_some()
|
||||
|| self.step_count >= Self::MAX_STEPS;
|
||||
|
||||
if done {
|
||||
terminated = true;
|
||||
// Récompense finale basée sur le résultat
|
||||
if let Some(winner_id) = self.game.determine_winner() {
|
||||
if winner_id == self.active_player_id {
|
||||
reward += 100.0; // Victoire
|
||||
} else {
|
||||
reward -= 50.0; // Défaite
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Mettre à jour l'état
|
||||
self.current_state = TrictracState::from_game_state(&self.game);
|
||||
self.episode_reward += reward;
|
||||
|
||||
if self.visualized && terminated {
|
||||
println!(
|
||||
"Episode terminé. Récompense totale: {:.2}, Étapes: {}",
|
||||
self.episode_reward, self.step_count
|
||||
);
|
||||
}
|
||||
|
||||
Snapshot::new(self.current_state, reward, terminated)
|
||||
}
|
||||
}
|
||||
|
||||
impl TrictracEnvironment {
|
||||
/// Convertit une action burn-rl vers une action Trictrac
|
||||
fn convert_action(
|
||||
&self,
|
||||
action: TrictracAction,
|
||||
game_state: &GameState,
|
||||
) -> Option<dqn_common::TrictracAction> {
|
||||
dqn_common::TrictracAction::from_action_index(action.index.try_into().unwrap())
|
||||
}
|
||||
|
||||
/// Convertit l'index d'une action au sein des actions valides vers une action Trictrac
|
||||
fn convert_valid_action_index(
|
||||
&self,
|
||||
action: TrictracAction,
|
||||
game_state: &GameState,
|
||||
) -> Option<dqn_common::TrictracAction> {
|
||||
use dqn_common::get_valid_actions;
|
||||
|
||||
// Obtenir les actions valides dans le contexte actuel
|
||||
let valid_actions = get_valid_actions(game_state);
|
||||
|
||||
if valid_actions.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Mapper l'index d'action sur une action valide
|
||||
let action_index = (action.index as usize) % valid_actions.len();
|
||||
Some(valid_actions[action_index].clone())
|
||||
}
|
||||
|
||||
/// Exécute une action Trictrac dans le jeu
|
||||
fn execute_action(
|
||||
&mut self,
|
||||
action: dqn_common::TrictracAction,
|
||||
) -> Result<f32, Box<dyn std::error::Error>> {
|
||||
use dqn_common::TrictracAction;
|
||||
|
||||
let mut reward = 0.0;
|
||||
|
||||
let event = match action {
|
||||
TrictracAction::Roll => {
|
||||
// Lancer les dés
|
||||
reward += 0.1;
|
||||
Some(GameEvent::Roll {
|
||||
player_id: self.active_player_id,
|
||||
})
|
||||
}
|
||||
// TrictracAction::Mark => {
|
||||
// // Marquer des points
|
||||
// let points = self.game.
|
||||
// reward += 0.1 * points as f32;
|
||||
// Some(GameEvent::Mark {
|
||||
// player_id: self.active_player_id,
|
||||
// points,
|
||||
// })
|
||||
// }
|
||||
TrictracAction::Go => {
|
||||
// Continuer après avoir gagné un trou
|
||||
reward += 0.2;
|
||||
Some(GameEvent::Go {
|
||||
player_id: self.active_player_id,
|
||||
})
|
||||
}
|
||||
TrictracAction::Move {
|
||||
dice_order,
|
||||
from1,
|
||||
from2,
|
||||
} => {
|
||||
// Effectuer un mouvement
|
||||
let (dice1, dice2) = if dice_order {
|
||||
(self.game.dice.values.0, self.game.dice.values.1)
|
||||
} else {
|
||||
(self.game.dice.values.1, self.game.dice.values.0)
|
||||
};
|
||||
let mut to1 = from1 + dice1 as usize;
|
||||
let mut to2 = from2 + dice2 as usize;
|
||||
|
||||
// Gestion prise de coin par puissance
|
||||
let opp_rest_field = 13;
|
||||
if to1 == opp_rest_field && to2 == opp_rest_field {
|
||||
to1 -= 1;
|
||||
to2 -= 1;
|
||||
}
|
||||
|
||||
let checker_move1 = store::CheckerMove::new(from1, to1).unwrap_or_default();
|
||||
let checker_move2 = store::CheckerMove::new(from2, to2).unwrap_or_default();
|
||||
|
||||
reward += 0.2;
|
||||
Some(GameEvent::Move {
|
||||
player_id: self.active_player_id,
|
||||
moves: (checker_move1, checker_move2),
|
||||
})
|
||||
}
|
||||
};
|
||||
|
||||
// Appliquer l'événement si valide
|
||||
if let Some(event) = event {
|
||||
if self.game.validate(&event) {
|
||||
self.game.consume(&event);
|
||||
|
||||
// Simuler le résultat des dés après un Roll
|
||||
if matches!(action, TrictracAction::Roll) {
|
||||
let mut rng = thread_rng();
|
||||
let dice_values = (rng.gen_range(1..=6), rng.gen_range(1..=6));
|
||||
let dice_event = GameEvent::RollResult {
|
||||
player_id: self.active_player_id,
|
||||
dice: store::Dice {
|
||||
values: dice_values,
|
||||
},
|
||||
};
|
||||
if self.game.validate(&dice_event) {
|
||||
self.game.consume(&dice_event);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Pénalité pour action invalide
|
||||
reward -= 2.0;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(reward)
|
||||
}
|
||||
|
||||
/// Fait jouer l'adversaire avec une stratégie simple
|
||||
fn play_opponent_if_needed(&mut self) -> f32 {
|
||||
let mut reward = 0.0;
|
||||
|
||||
// Si c'est le tour de l'adversaire, jouer automatiquement
|
||||
if self.game.active_player_id == self.opponent_id && self.game.stage != Stage::Ended {
|
||||
// Utiliser la stratégie default pour l'adversaire
|
||||
use crate::strategy::default::DefaultStrategy;
|
||||
use crate::BotStrategy;
|
||||
|
||||
let mut default_strategy = DefaultStrategy::default();
|
||||
default_strategy.set_player_id(self.opponent_id);
|
||||
if let Some(color) = self.game.player_color_by_id(&self.opponent_id) {
|
||||
default_strategy.set_color(color);
|
||||
}
|
||||
*default_strategy.get_mut_game() = self.game.clone();
|
||||
|
||||
// Exécuter l'action selon le turn_stage
|
||||
let event = match self.game.turn_stage {
|
||||
TurnStage::RollDice => GameEvent::Roll {
|
||||
player_id: self.opponent_id,
|
||||
},
|
||||
TurnStage::RollWaiting => {
|
||||
let mut rng = thread_rng();
|
||||
let dice_values = (rng.gen_range(1..=6), rng.gen_range(1..=6));
|
||||
GameEvent::RollResult {
|
||||
player_id: self.opponent_id,
|
||||
dice: store::Dice {
|
||||
values: dice_values,
|
||||
},
|
||||
}
|
||||
}
|
||||
TurnStage::MarkAdvPoints | TurnStage::MarkPoints => {
|
||||
let opponent_color = store::Color::Black;
|
||||
let dice_roll_count = self
|
||||
.game
|
||||
.players
|
||||
.get(&self.opponent_id)
|
||||
.unwrap()
|
||||
.dice_roll_count;
|
||||
let points_rules =
|
||||
PointsRules::new(&opponent_color, &self.game.board, self.game.dice);
|
||||
let points = points_rules.get_points(dice_roll_count).0;
|
||||
reward -= 0.3 * points as f32; // Récompense proportionnelle aux points
|
||||
|
||||
GameEvent::Mark {
|
||||
player_id: self.opponent_id,
|
||||
points,
|
||||
}
|
||||
}
|
||||
TurnStage::HoldOrGoChoice => {
|
||||
// Stratégie simple : toujours continuer
|
||||
GameEvent::Go {
|
||||
player_id: self.opponent_id,
|
||||
}
|
||||
}
|
||||
TurnStage::Move => GameEvent::Move {
|
||||
player_id: self.opponent_id,
|
||||
moves: default_strategy.choose_move(),
|
||||
},
|
||||
};
|
||||
|
||||
if self.game.validate(&event) {
|
||||
self.game.consume(&event);
|
||||
}
|
||||
}
|
||||
reward
|
||||
}
|
||||
}
|
||||
|
|
@ -1,63 +0,0 @@
|
|||
use bot::burnrl::{dqn_model, environment, utils::demo_model};
|
||||
use burn::backend::{ndarray::NdArrayDevice, Autodiff, NdArray};
|
||||
use burn::module::Module;
|
||||
use burn::record::{CompactRecorder, Recorder};
|
||||
use burn_rl::agent::DQN;
|
||||
use burn_rl::base::{Action, Agent, ElemType, Environment, State};
|
||||
|
||||
type Backend = Autodiff<NdArray<ElemType>>;
|
||||
type Env = environment::TrictracEnvironment;
|
||||
|
||||
fn main() {
|
||||
println!("> Entraînement");
|
||||
let num_episodes = 50;
|
||||
let agent = dqn_model::run::<Env, Backend>(num_episodes, false); //true);
|
||||
|
||||
let valid_agent = agent.valid();
|
||||
|
||||
println!("> Sauvegarde du modèle de validation");
|
||||
|
||||
let path = "models/burn_dqn_50".to_string();
|
||||
save_model(valid_agent.model().as_ref().unwrap(), &path);
|
||||
|
||||
// println!("> Test avec le modèle entraîné");
|
||||
// demo_model::<Env>(valid_agent);
|
||||
|
||||
println!("> Chargement du modèle pour test");
|
||||
let loaded_model = load_model(&path);
|
||||
let loaded_agent = DQN::new(loaded_model);
|
||||
|
||||
println!("> Test avec le modèle chargé");
|
||||
demo_model(loaded_agent);
|
||||
}
|
||||
|
||||
fn save_model(model: &dqn_model::Net<NdArray<ElemType>>, path: &String) {
|
||||
let recorder = CompactRecorder::new();
|
||||
let model_path = format!("{}_model.mpk", path);
|
||||
println!("Modèle de validation sauvegardé : {}", model_path);
|
||||
recorder
|
||||
.record(model.clone().into_record(), model_path.into())
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
fn load_model(path: &String) -> dqn_model::Net<NdArray<ElemType>> {
|
||||
// TODO : reprendre le DENSE_SIZE de dqn_model.rs
|
||||
const DENSE_SIZE: usize = 128;
|
||||
|
||||
let model_path = format!("{}_model.mpk", path);
|
||||
println!("Chargement du modèle depuis : {}", model_path);
|
||||
|
||||
let device = NdArrayDevice::default();
|
||||
let recorder = CompactRecorder::new();
|
||||
|
||||
let record = recorder
|
||||
.load(model_path.into(), &device)
|
||||
.expect("Impossible de charger le modèle");
|
||||
|
||||
dqn_model::Net::new(
|
||||
<environment::TrictracEnvironment as Environment>::StateType::size(),
|
||||
DENSE_SIZE,
|
||||
<environment::TrictracEnvironment as Environment>::ActionType::size(),
|
||||
)
|
||||
.load_record(record)
|
||||
}
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
pub mod dqn_model;
|
||||
pub mod environment;
|
||||
pub mod utils;
|
||||
|
|
@ -1,83 +0,0 @@
|
|||
use crate::burnrl::environment::{TrictracAction, TrictracEnvironment};
|
||||
use crate::strategy::dqn_common::get_valid_action_indices;
|
||||
use burn::module::{Param, ParamId};
|
||||
use burn::nn::Linear;
|
||||
use burn::tensor::backend::Backend;
|
||||
use burn::tensor::cast::ToElement;
|
||||
use burn::tensor::Tensor;
|
||||
use burn_rl::agent::{DQNModel, DQN};
|
||||
use burn_rl::base::{ElemType, Environment, State};
|
||||
|
||||
pub fn demo_model<B: Backend, M: DQNModel<B>>(agent: DQN<TrictracEnvironment, B, M>) {
|
||||
let mut env = TrictracEnvironment::new(true);
|
||||
let mut done = false;
|
||||
while !done {
|
||||
// let action = match infer_action(&agent, &env, state) {
|
||||
let action = match infer_action(&agent, &env) {
|
||||
Some(value) => value,
|
||||
None => break,
|
||||
};
|
||||
// Execute action
|
||||
let snapshot = env.step(action);
|
||||
done = snapshot.done();
|
||||
}
|
||||
}
|
||||
|
||||
fn infer_action<B: Backend, M: DQNModel<B>>(
|
||||
agent: &DQN<TrictracEnvironment, B, M>,
|
||||
env: &TrictracEnvironment,
|
||||
) -> Option<TrictracAction> {
|
||||
let state = env.state();
|
||||
// Get q-values
|
||||
let q_values = agent
|
||||
.model()
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.infer(state.to_tensor().unsqueeze());
|
||||
// Get valid actions
|
||||
let valid_actions_indices = get_valid_action_indices(&env.game);
|
||||
if valid_actions_indices.is_empty() {
|
||||
return None; // No valid actions, end of episode
|
||||
}
|
||||
// Set non valid actions q-values to lowest
|
||||
let mut masked_q_values = q_values.clone();
|
||||
let q_values_vec: Vec<f32> = q_values.into_data().into_vec().unwrap();
|
||||
for (index, q_value) in q_values_vec.iter().enumerate() {
|
||||
if !valid_actions_indices.contains(&index) {
|
||||
masked_q_values = masked_q_values.clone().mask_fill(
|
||||
masked_q_values.clone().equal_elem(*q_value),
|
||||
f32::NEG_INFINITY,
|
||||
);
|
||||
}
|
||||
}
|
||||
// Get best action (highest q-value)
|
||||
let action_index = masked_q_values.argmax(1).into_scalar().to_u32();
|
||||
let action = TrictracAction::from(action_index);
|
||||
Some(action)
|
||||
}
|
||||
|
||||
fn soft_update_tensor<const N: usize, B: Backend>(
|
||||
this: &Param<Tensor<B, N>>,
|
||||
that: &Param<Tensor<B, N>>,
|
||||
tau: ElemType,
|
||||
) -> Param<Tensor<B, N>> {
|
||||
let that_weight = that.val();
|
||||
let this_weight = this.val();
|
||||
let new_weight = this_weight * (1.0 - tau) + that_weight * tau;
|
||||
|
||||
Param::initialized(ParamId::new(), new_weight)
|
||||
}
|
||||
|
||||
pub fn soft_update_linear<B: Backend>(
|
||||
this: Linear<B>,
|
||||
that: &Linear<B>,
|
||||
tau: ElemType,
|
||||
) -> Linear<B> {
|
||||
let weight = soft_update_tensor(&this.weight, &that.weight, tau);
|
||||
let bias = match (&this.bias, &that.bias) {
|
||||
(Some(this_bias), Some(that_bias)) => Some(soft_update_tensor(this_bias, that_bias, tau)),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
Linear::<B> { weight, bias }
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue