refact: migrate sqlx + sqlite to tokio-postgresql

This commit is contained in:
Henri Bourcereau 2026-04-22 21:36:56 +02:00
parent 4f5e21becb
commit 03b614c62e
12 changed files with 838 additions and 250 deletions

607
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -13,6 +13,16 @@ members = [
"spiel_bot", "spiel_bot",
] ]
default-members = [
"store",
"clients/cli",
"clients/backbone-lib",
"server/protocol",
"server/relay-server",
"bot",
"spiel_bot",
]
# For the server we will need opt-level='3' # For the server we will need opt-level='3'
[profile.release] [profile.release]
opt-level = 'z' # Minimum space opt-level = 'z' # Minimum space

View file

@ -3,10 +3,10 @@
"devenv": { "devenv": {
"locked": { "locked": {
"dir": "src/modules", "dir": "src/modules",
"lastModified": 1770390537, "lastModified": 1776863933,
"owner": "cachix", "owner": "cachix",
"repo": "devenv", "repo": "devenv",
"rev": "d6f45cc00829254a9a6f8807c8fbfaf3efa7e629", "rev": "863b4204725efaeeb73811e376f928232b720646",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -40,10 +40,10 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1769939035, "lastModified": 1776796298,
"owner": "cachix", "owner": "cachix",
"repo": "git-hooks.nix", "repo": "git-hooks.nix",
"rev": "a8ca480175326551d6c4121498316261cbb5b260", "rev": "3cfd774b0a530725a077e17354fbdb87ea1c4aad",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -74,10 +74,10 @@
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1770136044, "lastModified": 1776734388,
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "e576e3c9cf9bad747afcddd9e34f51d18c855b4e", "rev": "10e7ad5bbcb421fe07e3a4ad53a634b0cd57ffac",
"type": "github" "type": "github"
}, },
"original": { "original": {

View file

@ -8,7 +8,9 @@ in
# for Leptos # for Leptos
pkgs.trunk pkgs.trunk
pkgs.lld pkgs.lld
# pkgs.wasm-bindgen-cli_0_2_114
# for backbone-lib
pkgs.wasm-bindgen-cli_0_2_114
pkgs.binaryen # for wasm-opt pkgs.binaryen # for wasm-opt
# pour burn-rs # pour burn-rs
@ -25,6 +27,13 @@ in
]; ];
services.postgres = {
enable = true;
listen_addresses = "*";
# port = 5432;
initialDatabases = [{ name = "trictrac"; user = "trictrac"; pass = "trictrac"; }];
};
# https://devenv.sh/languages/ # https://devenv.sh/languages/
languages.rust.enable = true; languages.rust.enable = true;

View file

@ -4,7 +4,7 @@ version = "0.1.0"
edition = "2024" edition = "2024"
[dependencies] [dependencies]
tokio = {version = "1.48.0", features = ["full"]} tokio = { version = "1.48.0", features = ["full"] }
axum = { version = "0.8.7", features = ["ws"] } axum = { version = "0.8.7", features = ["ws"] }
tracing-subscriber = { version = "0.3", features = ["env-filter"] } tracing-subscriber = { version = "0.3", features = ["env-filter"] }
serde = { version = "1.0.228", features = ["derive"] } serde = { version = "1.0.228", features = ["derive"] }
@ -14,16 +14,14 @@ postcard = "1.1.3"
bytes = "1.11.0" bytes = "1.11.0"
tracing = "0.1.41" tracing = "0.1.41"
tower-http = { version = "0.6.7", features = ["fs", "cors"] } tower-http = { version = "0.6.7", features = ["fs", "cors"] }
protocol = {path = "../protocol"} protocol = { path = "../protocol" }
rand = "0.8" rand = "0.8"
# User management / auth # User management / auth
sqlx = { version = "0.8", features = ["sqlite", "runtime-tokio", "migrate"] } tokio-postgres = "0.7"
deadpool-postgres = { version = "0.14", features = ["rt_tokio_1"] }
tower-sessions = "0.14" tower-sessions = "0.14"
tower-sessions-sqlx-store = { version = "0.15", features = ["sqlite"] }
axum-login = "0.18" axum-login = "0.18"
argon2 = "0.5" argon2 = "0.5"
time = "0.3" time = "0.3"
thiserror = "1" thiserror = "1"

View file

@ -1,10 +1,6 @@
[ [
{ {
"name" : "tic-tac-toe", "name": "trictrac",
"max_players" : 10 "max_players": 10
},
{
"name" : "Ternio",
"max_players" : 3
} }
] ]

View file

@ -1,24 +1,24 @@
CREATE TABLE IF NOT EXISTS users ( CREATE TABLE IF NOT EXISTS users (
id INTEGER PRIMARY KEY AUTOINCREMENT, id BIGSERIAL PRIMARY KEY,
username TEXT NOT NULL UNIQUE, username TEXT NOT NULL UNIQUE,
email TEXT NOT NULL UNIQUE, email TEXT NOT NULL UNIQUE,
password_hash TEXT NOT NULL, password_hash TEXT NOT NULL,
created_at INTEGER NOT NULL created_at BIGINT NOT NULL
); );
CREATE TABLE IF NOT EXISTS game_records ( CREATE TABLE IF NOT EXISTS game_records (
id INTEGER PRIMARY KEY AUTOINCREMENT, id BIGSERIAL PRIMARY KEY,
game_id TEXT NOT NULL, game_id TEXT NOT NULL,
room_code TEXT NOT NULL, room_code TEXT NOT NULL,
started_at INTEGER NOT NULL, started_at BIGINT NOT NULL,
ended_at INTEGER, ended_at BIGINT,
result TEXT result TEXT
); );
CREATE TABLE IF NOT EXISTS game_participants ( CREATE TABLE IF NOT EXISTS game_participants (
id INTEGER PRIMARY KEY AUTOINCREMENT, id BIGSERIAL PRIMARY KEY,
game_record_id INTEGER NOT NULL REFERENCES game_records(id), game_record_id BIGINT NOT NULL REFERENCES game_records(id),
user_id INTEGER REFERENCES users(id), user_id BIGINT REFERENCES users(id),
player_id INTEGER NOT NULL, player_id BIGINT NOT NULL,
outcome TEXT outcome TEXT
); );

View file

@ -7,7 +7,7 @@ use argon2::password_hash::{PasswordHash, PasswordHasher, PasswordVerifier, Salt
use argon2::password_hash::rand_core::OsRng; use argon2::password_hash::rand_core::OsRng;
use argon2::Argon2; use argon2::Argon2;
use axum_login::{AuthUser, AuthnBackend, UserId}; use axum_login::{AuthUser, AuthnBackend, UserId};
use sqlx::SqlitePool; use deadpool_postgres::Pool;
use crate::db; use crate::db;
@ -39,7 +39,7 @@ pub struct Credentials {
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
pub enum AuthError { pub enum AuthError {
#[error("database error: {0}")] #[error("database error: {0}")]
Database(#[from] sqlx::Error), Database(#[from] db::DbError),
#[error("password hashing error")] #[error("password hashing error")]
PasswordHash, PasswordHash,
} }
@ -48,11 +48,11 @@ pub enum AuthError {
#[derive(Clone)] #[derive(Clone)]
pub struct AuthBackend { pub struct AuthBackend {
pool: SqlitePool, pool: Pool,
} }
impl AuthBackend { impl AuthBackend {
pub fn new(pool: SqlitePool) -> Self { pub fn new(pool: Pool) -> Self {
Self { pool } Self { pool }
} }
} }

View file

@ -1,14 +1,14 @@
//! Database access layer. //! Database access layer.
//! //!
//! All SQLite interaction is funnelled through this module. Functions return //! All PostgreSQL interaction is funnelled through this module. Functions return
//! `sqlx::Result` so callers can handle errors uniformly. //! `Result<_, DbError>` so callers can handle errors uniformly.
use sqlx::sqlite::SqliteConnectOptions; use deadpool_postgres::{Manager, ManagerConfig, Pool, RecyclingMethod};
use sqlx::{SqlitePool, pool::PoolOptions}; use tokio_postgres::{NoTls, error::SqlState};
use std::time::{SystemTime, UNIX_EPOCH}; use std::time::{SystemTime, UNIX_EPOCH};
/// A registered user as stored in the database. /// A registered user as stored in the database.
#[derive(Clone, Debug, sqlx::FromRow)] #[derive(Clone, Debug)]
pub struct User { pub struct User {
pub id: i64, pub id: i64,
pub username: String, pub username: String,
@ -18,7 +18,6 @@ pub struct User {
} }
/// Aggregated game statistics for a user's public profile. /// Aggregated game statistics for a user's public profile.
#[derive(sqlx::FromRow)]
pub struct UserStats { pub struct UserStats {
pub total: i64, pub total: i64,
pub wins: i64, pub wins: i64,
@ -27,7 +26,6 @@ pub struct UserStats {
} }
/// A condensed game entry returned by [`get_user_games`]. /// A condensed game entry returned by [`get_user_games`].
#[derive(sqlx::FromRow)]
pub struct GameSummary { pub struct GameSummary {
pub id: i64, pub id: i64,
pub game_id: String, pub game_id: String,
@ -38,6 +36,24 @@ pub struct GameSummary {
pub outcome: Option<String>, pub outcome: Option<String>,
} }
#[derive(Debug, thiserror::Error)]
pub enum DbError {
#[error("connection pool error: {0}")]
Pool(#[from] deadpool_postgres::PoolError),
#[error("database error: {0}")]
Db(#[from] tokio_postgres::Error),
}
impl DbError {
pub fn is_unique_violation(&self) -> bool {
if let DbError::Db(e) = self {
e.code() == Some(&SqlState::UNIQUE_VIOLATION)
} else {
false
}
}
}
fn now_unix() -> i64 { fn now_unix() -> i64 {
SystemTime::now() SystemTime::now()
.duration_since(UNIX_EPOCH) .duration_since(UNIX_EPOCH)
@ -45,34 +61,28 @@ fn now_unix() -> i64 {
.as_secs() as i64 .as_secs() as i64
} }
/// Opens (or creates) the SQLite database at `path` and runs all pending migrations. /// Connects to the PostgreSQL database at `url` and runs all pending migrations.
pub async fn init_db(path: &str) -> SqlitePool { pub async fn init_db(url: &str) -> Pool {
if let Some(parent) = std::path::Path::new(path).parent() { let pg_config: tokio_postgres::Config = url.parse().expect("Invalid DATABASE_URL");
if !parent.as_os_str().is_empty() { let manager = Manager::from_config(
tokio::fs::create_dir_all(parent) pg_config,
.await NoTls,
.expect("Failed to create database directory"); ManagerConfig { recycling_method: RecyclingMethod::Fast },
} );
} let pool = Pool::builder(manager)
.max_size(5)
.build()
.expect("Failed to build connection pool");
let pool = PoolOptions::<sqlx::Sqlite>::new() let client = pool.get().await.expect("Failed to get connection for migrations");
.max_connections(5) client
.connect_with( .batch_execute(include_str!("../migrations/001_init.sql"))
SqliteConnectOptions::new()
.filename(path)
.create_if_missing(true),
)
.await .await
.expect("Failed to open SQLite database"); .expect("Migration 001 failed");
client
sqlx::migrate::Migrator::new( .batch_execute(include_str!("../migrations/002_participants_unique.sql"))
std::path::Path::new(concat!(env!("CARGO_MANIFEST_DIR"), "/migrations")), .await
) .expect("Migration 002 failed");
.await
.expect("Failed to locate migrations directory")
.run(&pool)
.await
.expect("Failed to run database migrations");
pool pool
} }
@ -80,135 +90,164 @@ pub async fn init_db(path: &str) -> SqlitePool {
// ── Users ──────────────────────────────────────────────────────────────────── // ── Users ────────────────────────────────────────────────────────────────────
pub async fn create_user( pub async fn create_user(
pool: &SqlitePool, pool: &Pool,
username: &str, username: &str,
email: &str, email: &str,
password_hash: &str, password_hash: &str,
) -> sqlx::Result<i64> { ) -> Result<i64, DbError> {
let id = sqlx::query( let client = pool.get().await?;
"INSERT INTO users (username, email, password_hash, created_at) VALUES (?, ?, ?, ?)", let row = client
) .query_one(
.bind(username) "INSERT INTO users (username, email, password_hash, created_at) \
.bind(email) VALUES ($1, $2, $3, $4) RETURNING id",
.bind(password_hash) &[&username, &email, &password_hash, &now_unix()],
.bind(now_unix()) )
.execute(pool) .await?;
.await? Ok(row.get(0))
.last_insert_rowid();
Ok(id)
} }
pub async fn get_user_by_id(pool: &SqlitePool, id: i64) -> sqlx::Result<Option<User>> { pub async fn get_user_by_id(pool: &Pool, id: i64) -> Result<Option<User>, DbError> {
sqlx::query_as::<_, User>( let client = pool.get().await?;
"SELECT id, username, email, password_hash, created_at FROM users WHERE id = ?", let row = client
) .query_opt(
.bind(id) "SELECT id, username, email, password_hash, created_at FROM users WHERE id = $1",
.fetch_optional(pool) &[&id],
.await )
.await?;
Ok(row.map(|r| User {
id: r.get("id"),
username: r.get("username"),
email: r.get("email"),
password_hash: r.get("password_hash"),
created_at: r.get("created_at"),
}))
} }
pub async fn get_user_by_username(pool: &SqlitePool, username: &str) -> sqlx::Result<Option<User>> { pub async fn get_user_by_username(pool: &Pool, username: &str) -> Result<Option<User>, DbError> {
sqlx::query_as::<_, User>( let client = pool.get().await?;
"SELECT id, username, email, password_hash, created_at FROM users WHERE username = ?", let row = client
) .query_opt(
.bind(username) "SELECT id, username, email, password_hash, created_at FROM users WHERE username = $1",
.fetch_optional(pool) &[&username],
.await )
.await?;
Ok(row.map(|r| User {
id: r.get("id"),
username: r.get("username"),
email: r.get("email"),
password_hash: r.get("password_hash"),
created_at: r.get("created_at"),
}))
} }
// ── Game records ───────────────────────────────────────────────────────────── // ── Game records ─────────────────────────────────────────────────────────────
/// Creates a new game record when a room opens. Returns the record id. /// Creates a new game record when a room opens. Returns the record id.
pub async fn insert_game_record( pub async fn insert_game_record(
pool: &SqlitePool, pool: &Pool,
game_id: &str, game_id: &str,
room_code: &str, room_code: &str,
) -> sqlx::Result<i64> { ) -> Result<i64, DbError> {
let id = sqlx::query( let client = pool.get().await?;
"INSERT INTO game_records (game_id, room_code, started_at) VALUES (?, ?, ?)", let row = client
) .query_one(
.bind(game_id) "INSERT INTO game_records (game_id, room_code, started_at) \
.bind(room_code) VALUES ($1, $2, $3) RETURNING id",
.bind(now_unix()) &[&game_id, &room_code, &now_unix()],
.execute(pool) )
.await? .await?;
.last_insert_rowid(); Ok(row.get(0))
Ok(id)
} }
/// Stamps `ended_at` and stores the opaque result JSON supplied by the game. /// Stamps `ended_at` and stores the opaque result JSON supplied by the game.
pub async fn close_game_record( pub async fn close_game_record(
pool: &SqlitePool, pool: &Pool,
record_id: i64, record_id: i64,
result_json: Option<&str>, result_json: Option<&str>,
) -> sqlx::Result<()> { ) -> Result<(), DbError> {
// AND ended_at IS NULL prevents overwriting a result already set by POST /games/result // AND ended_at IS NULL prevents overwriting a result already set by POST /games/result
sqlx::query( let client = pool.get().await?;
"UPDATE game_records SET ended_at = ?, result = ? WHERE id = ? AND ended_at IS NULL", client
) .execute(
.bind(now_unix()) "UPDATE game_records SET ended_at = $1, result = $2 \
.bind(result_json) WHERE id = $3 AND ended_at IS NULL",
.bind(record_id) &[&now_unix(), &result_json, &record_id],
.execute(pool) )
.await?; .await?;
Ok(()) Ok(())
} }
/// Records a player's participation in a game. `user_id` is `None` for anonymous players. /// Records a player's participation in a game. `user_id` is `None` for anonymous players.
pub async fn insert_participant( pub async fn insert_participant(
pool: &SqlitePool, pool: &Pool,
record_id: i64, record_id: i64,
user_id: Option<i64>, user_id: Option<i64>,
player_id: u16, player_id: u16,
outcome: Option<&str>, outcome: Option<&str>,
) -> sqlx::Result<()> { ) -> Result<(), DbError> {
sqlx::query( let client = pool.get().await?;
"INSERT OR IGNORE INTO game_participants (game_record_id, user_id, player_id, outcome) client
VALUES (?, ?, ?, ?)", .execute(
) "INSERT INTO game_participants (game_record_id, user_id, player_id, outcome) \
.bind(record_id) VALUES ($1, $2, $3, $4) ON CONFLICT DO NOTHING",
.bind(user_id) &[&record_id, &user_id, &(player_id as i64), &outcome],
.bind(player_id as i64) )
.bind(outcome) .await?;
.execute(pool)
.await?;
Ok(()) Ok(())
} }
/// Returns win/loss/draw counts for a user. All values are 0 when the user has no games. /// Returns win/loss/draw counts for a user. All values are 0 when the user has no games.
pub async fn get_user_stats(pool: &SqlitePool, user_id: i64) -> sqlx::Result<UserStats> { pub async fn get_user_stats(pool: &Pool, user_id: i64) -> Result<UserStats, DbError> {
sqlx::query_as::<_, UserStats>( let client = pool.get().await?;
"SELECT let row = client
COUNT(*) as total, .query_one(
COALESCE(SUM(CASE WHEN outcome = 'win' THEN 1 ELSE 0 END), 0) as wins, "SELECT
COALESCE(SUM(CASE WHEN outcome = 'loss' THEN 1 ELSE 0 END), 0) as losses, COUNT(*) as total,
COALESCE(SUM(CASE WHEN outcome = 'draw' THEN 1 ELSE 0 END), 0) as draws COALESCE(SUM(CASE WHEN outcome = 'win' THEN 1 ELSE 0 END), 0::BIGINT) as wins,
FROM game_participants COALESCE(SUM(CASE WHEN outcome = 'loss' THEN 1 ELSE 0 END), 0::BIGINT) as losses,
WHERE user_id = ?", COALESCE(SUM(CASE WHEN outcome = 'draw' THEN 1 ELSE 0 END), 0::BIGINT) as draws
) FROM game_participants
.bind(user_id) WHERE user_id = $1",
.fetch_one(pool) &[&user_id],
.await )
.await?;
Ok(UserStats {
total: row.get("total"),
wins: row.get("wins"),
losses: row.get("losses"),
draws: row.get("draws"),
})
} }
/// Returns a paginated list of games a user participated in, newest first. /// Returns a paginated list of games a user participated in, newest first.
pub async fn get_user_games( pub async fn get_user_games(
pool: &SqlitePool, pool: &Pool,
user_id: i64, user_id: i64,
page: i64, page: i64,
per_page: i64, per_page: i64,
) -> sqlx::Result<Vec<GameSummary>> { ) -> Result<Vec<GameSummary>, DbError> {
sqlx::query_as::<_, GameSummary>( let client = pool.get().await?;
"SELECT gr.id, gr.game_id, gr.room_code, gr.started_at, gr.ended_at, gr.result, gp.outcome let rows = client
FROM game_records gr .query(
JOIN game_participants gp ON gp.game_record_id = gr.id "SELECT gr.id, gr.game_id, gr.room_code, gr.started_at, gr.ended_at, gr.result, gp.outcome
WHERE gp.user_id = ? FROM game_records gr
ORDER BY gr.started_at DESC JOIN game_participants gp ON gp.game_record_id = gr.id
LIMIT ? OFFSET ?", WHERE gp.user_id = $1
) ORDER BY gr.started_at DESC
.bind(user_id) LIMIT $2 OFFSET $3",
.bind(per_page) &[&user_id, &per_page, &(page * per_page)],
.bind(page * per_page) )
.fetch_all(pool) .await?;
.await Ok(rows
.into_iter()
.map(|r| GameSummary {
id: r.get("id"),
game_id: r.get("game_id"),
room_code: r.get("room_code"),
started_at: r.get("started_at"),
ended_at: r.get("ended_at"),
result: r.get("result"),
outcome: r.get("outcome"),
})
.collect())
} }

View file

@ -43,7 +43,7 @@ pub fn router() -> Router<Arc<AppState>> {
// ── Error type ──────────────────────────────────────────────────────────────── // ── Error type ────────────────────────────────────────────────────────────────
enum AppError { enum AppError {
Database(sqlx::Error), Database(db::DbError),
NotFound, NotFound,
Conflict(&'static str), Conflict(&'static str),
BadRequest(&'static str), BadRequest(&'static str),
@ -67,16 +67,12 @@ impl IntoResponse for AppError {
} }
} }
impl From<sqlx::Error> for AppError { impl From<db::DbError> for AppError {
fn from(e: sqlx::Error) -> Self { fn from(e: db::DbError) -> Self {
AppError::Database(e) AppError::Database(e)
} }
} }
fn is_unique_violation(e: &sqlx::Error) -> bool {
matches!(e, sqlx::Error::Database(db_err) if db_err.message().contains("UNIQUE constraint failed"))
}
// ── Request / response bodies ───────────────────────────────────────────────── // ── Request / response bodies ─────────────────────────────────────────────────
#[derive(Deserialize)] #[derive(Deserialize)]
@ -173,7 +169,7 @@ async fn register(
let user_id = db::create_user(&state.db, &body.username, &body.email, &hash) let user_id = db::create_user(&state.db, &body.username, &body.email, &hash)
.await .await
.map_err(|e| { .map_err(|e| {
if is_unique_violation(&e) { if e.is_unique_violation() {
AppError::Conflict("username or email already taken") AppError::Conflict("username or email already taken")
} else { } else {
AppError::Database(e) AppError::Database(e)
@ -276,17 +272,7 @@ async fn user_games(
// ── Game detail (Phase 5) ───────────────────────────────────────────────────── // ── Game detail (Phase 5) ─────────────────────────────────────────────────────
#[derive(sqlx::FromRow, Serialize)] #[derive(Serialize)]
struct GameRecordRow {
id: i64,
game_id: String,
room_code: String,
started_at: i64,
ended_at: Option<i64>,
result: Option<String>,
}
#[derive(sqlx::FromRow, Serialize)]
struct ParticipantWithUsername { struct ParticipantWithUsername {
player_id: i64, player_id: i64,
outcome: Option<String>, outcome: Option<String>,
@ -308,33 +294,46 @@ async fn game_detail(
Path(id): Path<i64>, Path(id): Path<i64>,
State(state): State<Arc<AppState>>, State(state): State<Arc<AppState>>,
) -> Result<impl IntoResponse, AppError> { ) -> Result<impl IntoResponse, AppError> {
let record = sqlx::query_as::<_, GameRecordRow>( let client = state.db.get().await.map_err(db::DbError::from)?;
"SELECT id, game_id, room_code, started_at, ended_at, result
FROM game_records WHERE id = ?",
)
.bind(id)
.fetch_optional(&state.db)
.await?
.ok_or(AppError::NotFound)?;
let participants = sqlx::query_as::<_, ParticipantWithUsername>( let record = client
"SELECT gp.player_id, gp.outcome, u.username .query_opt(
FROM game_participants gp "SELECT id, game_id, room_code, started_at, ended_at, result
LEFT JOIN users u ON u.id = gp.user_id FROM game_records WHERE id = $1",
WHERE gp.game_record_id = ? &[&id],
ORDER BY gp.player_id", )
) .await
.bind(id) .map_err(db::DbError::from)?
.fetch_all(&state.db) .ok_or(AppError::NotFound)?;
.await?;
let rows = client
.query(
"SELECT gp.player_id, gp.outcome, u.username
FROM game_participants gp
LEFT JOIN users u ON u.id = gp.user_id
WHERE gp.game_record_id = $1
ORDER BY gp.player_id",
&[&id],
)
.await
.map_err(db::DbError::from)?;
let participants = rows
.into_iter()
.map(|r| ParticipantWithUsername {
player_id: r.get("player_id"),
outcome: r.get("outcome"),
username: r.get("username"),
})
.collect();
Ok(Json(GameDetailResponse { Ok(Json(GameDetailResponse {
id: record.id, id: record.get("id"),
game_id: record.game_id, game_id: record.get("game_id"),
room_code: record.room_code, room_code: record.get("room_code"),
started_at: record.started_at, started_at: record.get("started_at"),
ended_at: record.ended_at, ended_at: record.get("ended_at"),
result: record.result, result: record.get("result"),
participants, participants,
})) }))
} }
@ -362,7 +361,7 @@ struct GameResultResponse {
/// ///
/// The room code + game ID act as the shared secret (same trust level as WS join). /// The room code + game ID act as the shared secret (same trust level as WS join).
/// `close_game_record` is idempotent (no-op if already closed), and participant /// `close_game_record` is idempotent (no-op if already closed), and participant
/// inserts use `INSERT OR IGNORE`, so safe retries are supported. /// inserts use `ON CONFLICT DO NOTHING`, so safe retries are supported.
async fn game_result( async fn game_result(
State(state): State<Arc<AppState>>, State(state): State<Arc<AppState>>,
Json(body): Json<GameResultBody>, Json(body): Json<GameResultBody>,

View file

@ -6,7 +6,7 @@
use bytes::Bytes; use bytes::Bytes;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use sqlx::SqlitePool; use deadpool_postgres::Pool;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use tokio::fs; use tokio::fs;
@ -57,12 +57,12 @@ pub struct AppState {
pub rooms: Mutex<HashMap<String, Room>>, pub rooms: Mutex<HashMap<String, Room>>,
/// Contains a mapping from game name to the maximum amount of players allowed. /// Contains a mapping from game name to the maximum amount of players allowed.
pub configs: RwLock<HashMap<String, u16>>, pub configs: RwLock<HashMap<String, u16>>,
/// SQLite connection pool — shared across all request handlers. /// PostgreSQL connection pool — shared across all request handlers.
pub db: SqlitePool, pub db: Pool,
} }
impl AppState { impl AppState {
pub fn new(db: SqlitePool) -> Self { pub fn new(db: Pool) -> Self {
Self { Self {
rooms: Mutex::new(HashMap::new()), rooms: Mutex::new(HashMap::new()),
configs: RwLock::new(HashMap::new()), configs: RwLock::new(HashMap::new()),

View file

@ -29,7 +29,7 @@ use axum::http::{HeaderName, Method};
use tower_http::cors::{AllowOrigin, CorsLayer}; use tower_http::cors::{AllowOrigin, CorsLayer};
use tower_http::services::{ServeDir, ServeFile}; use tower_http::services::{ServeDir, ServeFile};
use tower_sessions::{Expiry, SessionManagerLayer}; use tower_sessions::{Expiry, SessionManagerLayer};
use tower_sessions_sqlx_store::SqliteStore; use tower_sessions::MemoryStore;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
#[tokio::main] #[tokio::main]
@ -51,15 +51,11 @@ async fn main() {
) )
.init(); .init();
let db_path = std::env::var("DATABASE_PATH").unwrap_or_else(|_| "data/relay.db".to_string()); let database_url = std::env::var("DATABASE_URL")
let pool = db::init_db(&db_path).await; .unwrap_or_else(|_| "postgresql://trictrac:trictrac@127.0.0.1:5432/trictrac".to_string());
let pool = db::init_db(&database_url).await;
let session_store = SqliteStore::new(pool.clone());
session_store
.migrate()
.await
.expect("Failed to initialize session store");
let session_store = MemoryStore::default();
let session_layer = SessionManagerLayer::new(session_store) let session_layer = SessionManagerLayer::new(session_store)
.with_secure(false) .with_secure(false)
.with_expiry(Expiry::OnInactivity(TimeDuration::days(30))); .with_expiry(Expiry::OnInactivity(TimeDuration::days(30)));