refact: migrate sqlx + sqlite to tokio-postgresql

This commit is contained in:
Henri Bourcereau 2026-04-22 21:36:56 +02:00
parent 4f5e21becb
commit 03b614c62e
12 changed files with 838 additions and 250 deletions

607
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -13,6 +13,16 @@ members = [
"spiel_bot",
]
default-members = [
"store",
"clients/cli",
"clients/backbone-lib",
"server/protocol",
"server/relay-server",
"bot",
"spiel_bot",
]
# For the server we will need opt-level='3'
[profile.release]
opt-level = 'z' # Minimum space

View file

@ -3,10 +3,10 @@
"devenv": {
"locked": {
"dir": "src/modules",
"lastModified": 1770390537,
"lastModified": 1776863933,
"owner": "cachix",
"repo": "devenv",
"rev": "d6f45cc00829254a9a6f8807c8fbfaf3efa7e629",
"rev": "863b4204725efaeeb73811e376f928232b720646",
"type": "github"
},
"original": {
@ -40,10 +40,10 @@
]
},
"locked": {
"lastModified": 1769939035,
"lastModified": 1776796298,
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "a8ca480175326551d6c4121498316261cbb5b260",
"rev": "3cfd774b0a530725a077e17354fbdb87ea1c4aad",
"type": "github"
},
"original": {
@ -74,10 +74,10 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1770136044,
"lastModified": 1776734388,
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "e576e3c9cf9bad747afcddd9e34f51d18c855b4e",
"rev": "10e7ad5bbcb421fe07e3a4ad53a634b0cd57ffac",
"type": "github"
},
"original": {

View file

@ -8,7 +8,9 @@ in
# for Leptos
pkgs.trunk
pkgs.lld
# pkgs.wasm-bindgen-cli_0_2_114
# for backbone-lib
pkgs.wasm-bindgen-cli_0_2_114
pkgs.binaryen # for wasm-opt
# pour burn-rs
@ -25,6 +27,13 @@ in
];
services.postgres = {
enable = true;
listen_addresses = "*";
# port = 5432;
initialDatabases = [{ name = "trictrac"; user = "trictrac"; pass = "trictrac"; }];
};
# https://devenv.sh/languages/
languages.rust.enable = true;

View file

@ -4,7 +4,7 @@ version = "0.1.0"
edition = "2024"
[dependencies]
tokio = {version = "1.48.0", features = ["full"]}
tokio = { version = "1.48.0", features = ["full"] }
axum = { version = "0.8.7", features = ["ws"] }
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
serde = { version = "1.0.228", features = ["derive"] }
@ -14,16 +14,14 @@ postcard = "1.1.3"
bytes = "1.11.0"
tracing = "0.1.41"
tower-http = { version = "0.6.7", features = ["fs", "cors"] }
protocol = {path = "../protocol"}
protocol = { path = "../protocol" }
rand = "0.8"
# User management / auth
sqlx = { version = "0.8", features = ["sqlite", "runtime-tokio", "migrate"] }
tokio-postgres = "0.7"
deadpool-postgres = { version = "0.14", features = ["rt_tokio_1"] }
tower-sessions = "0.14"
tower-sessions-sqlx-store = { version = "0.15", features = ["sqlite"] }
axum-login = "0.18"
argon2 = "0.5"
time = "0.3"
thiserror = "1"

View file

@ -1,10 +1,6 @@
[
{
"name" : "tic-tac-toe",
"max_players" : 10
},
{
"name" : "Ternio",
"max_players" : 3
"name": "trictrac",
"max_players": 10
}
]
]

View file

@ -1,24 +1,24 @@
CREATE TABLE IF NOT EXISTS users (
id INTEGER PRIMARY KEY AUTOINCREMENT,
username TEXT NOT NULL UNIQUE,
email TEXT NOT NULL UNIQUE,
password_hash TEXT NOT NULL,
created_at INTEGER NOT NULL
id BIGSERIAL PRIMARY KEY,
username TEXT NOT NULL UNIQUE,
email TEXT NOT NULL UNIQUE,
password_hash TEXT NOT NULL,
created_at BIGINT NOT NULL
);
CREATE TABLE IF NOT EXISTS game_records (
id INTEGER PRIMARY KEY AUTOINCREMENT,
game_id TEXT NOT NULL,
room_code TEXT NOT NULL,
started_at INTEGER NOT NULL,
ended_at INTEGER,
id BIGSERIAL PRIMARY KEY,
game_id TEXT NOT NULL,
room_code TEXT NOT NULL,
started_at BIGINT NOT NULL,
ended_at BIGINT,
result TEXT
);
CREATE TABLE IF NOT EXISTS game_participants (
id INTEGER PRIMARY KEY AUTOINCREMENT,
game_record_id INTEGER NOT NULL REFERENCES game_records(id),
user_id INTEGER REFERENCES users(id),
player_id INTEGER NOT NULL,
id BIGSERIAL PRIMARY KEY,
game_record_id BIGINT NOT NULL REFERENCES game_records(id),
user_id BIGINT REFERENCES users(id),
player_id BIGINT NOT NULL,
outcome TEXT
);

View file

@ -7,7 +7,7 @@ use argon2::password_hash::{PasswordHash, PasswordHasher, PasswordVerifier, Salt
use argon2::password_hash::rand_core::OsRng;
use argon2::Argon2;
use axum_login::{AuthUser, AuthnBackend, UserId};
use sqlx::SqlitePool;
use deadpool_postgres::Pool;
use crate::db;
@ -39,7 +39,7 @@ pub struct Credentials {
#[derive(Debug, thiserror::Error)]
pub enum AuthError {
#[error("database error: {0}")]
Database(#[from] sqlx::Error),
Database(#[from] db::DbError),
#[error("password hashing error")]
PasswordHash,
}
@ -48,11 +48,11 @@ pub enum AuthError {
#[derive(Clone)]
pub struct AuthBackend {
pool: SqlitePool,
pool: Pool,
}
impl AuthBackend {
pub fn new(pool: SqlitePool) -> Self {
pub fn new(pool: Pool) -> Self {
Self { pool }
}
}

View file

@ -1,14 +1,14 @@
//! Database access layer.
//!
//! All SQLite interaction is funnelled through this module. Functions return
//! `sqlx::Result` so callers can handle errors uniformly.
//! All PostgreSQL interaction is funnelled through this module. Functions return
//! `Result<_, DbError>` so callers can handle errors uniformly.
use sqlx::sqlite::SqliteConnectOptions;
use sqlx::{SqlitePool, pool::PoolOptions};
use deadpool_postgres::{Manager, ManagerConfig, Pool, RecyclingMethod};
use tokio_postgres::{NoTls, error::SqlState};
use std::time::{SystemTime, UNIX_EPOCH};
/// A registered user as stored in the database.
#[derive(Clone, Debug, sqlx::FromRow)]
#[derive(Clone, Debug)]
pub struct User {
pub id: i64,
pub username: String,
@ -18,7 +18,6 @@ pub struct User {
}
/// Aggregated game statistics for a user's public profile.
#[derive(sqlx::FromRow)]
pub struct UserStats {
pub total: i64,
pub wins: i64,
@ -27,7 +26,6 @@ pub struct UserStats {
}
/// A condensed game entry returned by [`get_user_games`].
#[derive(sqlx::FromRow)]
pub struct GameSummary {
pub id: i64,
pub game_id: String,
@ -38,6 +36,24 @@ pub struct GameSummary {
pub outcome: Option<String>,
}
#[derive(Debug, thiserror::Error)]
pub enum DbError {
#[error("connection pool error: {0}")]
Pool(#[from] deadpool_postgres::PoolError),
#[error("database error: {0}")]
Db(#[from] tokio_postgres::Error),
}
impl DbError {
pub fn is_unique_violation(&self) -> bool {
if let DbError::Db(e) = self {
e.code() == Some(&SqlState::UNIQUE_VIOLATION)
} else {
false
}
}
}
fn now_unix() -> i64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
@ -45,34 +61,28 @@ fn now_unix() -> i64 {
.as_secs() as i64
}
/// Opens (or creates) the SQLite database at `path` and runs all pending migrations.
pub async fn init_db(path: &str) -> SqlitePool {
if let Some(parent) = std::path::Path::new(path).parent() {
if !parent.as_os_str().is_empty() {
tokio::fs::create_dir_all(parent)
.await
.expect("Failed to create database directory");
}
}
/// Connects to the PostgreSQL database at `url` and runs all pending migrations.
pub async fn init_db(url: &str) -> Pool {
let pg_config: tokio_postgres::Config = url.parse().expect("Invalid DATABASE_URL");
let manager = Manager::from_config(
pg_config,
NoTls,
ManagerConfig { recycling_method: RecyclingMethod::Fast },
);
let pool = Pool::builder(manager)
.max_size(5)
.build()
.expect("Failed to build connection pool");
let pool = PoolOptions::<sqlx::Sqlite>::new()
.max_connections(5)
.connect_with(
SqliteConnectOptions::new()
.filename(path)
.create_if_missing(true),
)
let client = pool.get().await.expect("Failed to get connection for migrations");
client
.batch_execute(include_str!("../migrations/001_init.sql"))
.await
.expect("Failed to open SQLite database");
sqlx::migrate::Migrator::new(
std::path::Path::new(concat!(env!("CARGO_MANIFEST_DIR"), "/migrations")),
)
.await
.expect("Failed to locate migrations directory")
.run(&pool)
.await
.expect("Failed to run database migrations");
.expect("Migration 001 failed");
client
.batch_execute(include_str!("../migrations/002_participants_unique.sql"))
.await
.expect("Migration 002 failed");
pool
}
@ -80,135 +90,164 @@ pub async fn init_db(path: &str) -> SqlitePool {
// ── Users ────────────────────────────────────────────────────────────────────
pub async fn create_user(
pool: &SqlitePool,
pool: &Pool,
username: &str,
email: &str,
password_hash: &str,
) -> sqlx::Result<i64> {
let id = sqlx::query(
"INSERT INTO users (username, email, password_hash, created_at) VALUES (?, ?, ?, ?)",
)
.bind(username)
.bind(email)
.bind(password_hash)
.bind(now_unix())
.execute(pool)
.await?
.last_insert_rowid();
Ok(id)
) -> Result<i64, DbError> {
let client = pool.get().await?;
let row = client
.query_one(
"INSERT INTO users (username, email, password_hash, created_at) \
VALUES ($1, $2, $3, $4) RETURNING id",
&[&username, &email, &password_hash, &now_unix()],
)
.await?;
Ok(row.get(0))
}
pub async fn get_user_by_id(pool: &SqlitePool, id: i64) -> sqlx::Result<Option<User>> {
sqlx::query_as::<_, User>(
"SELECT id, username, email, password_hash, created_at FROM users WHERE id = ?",
)
.bind(id)
.fetch_optional(pool)
.await
pub async fn get_user_by_id(pool: &Pool, id: i64) -> Result<Option<User>, DbError> {
let client = pool.get().await?;
let row = client
.query_opt(
"SELECT id, username, email, password_hash, created_at FROM users WHERE id = $1",
&[&id],
)
.await?;
Ok(row.map(|r| User {
id: r.get("id"),
username: r.get("username"),
email: r.get("email"),
password_hash: r.get("password_hash"),
created_at: r.get("created_at"),
}))
}
pub async fn get_user_by_username(pool: &SqlitePool, username: &str) -> sqlx::Result<Option<User>> {
sqlx::query_as::<_, User>(
"SELECT id, username, email, password_hash, created_at FROM users WHERE username = ?",
)
.bind(username)
.fetch_optional(pool)
.await
pub async fn get_user_by_username(pool: &Pool, username: &str) -> Result<Option<User>, DbError> {
let client = pool.get().await?;
let row = client
.query_opt(
"SELECT id, username, email, password_hash, created_at FROM users WHERE username = $1",
&[&username],
)
.await?;
Ok(row.map(|r| User {
id: r.get("id"),
username: r.get("username"),
email: r.get("email"),
password_hash: r.get("password_hash"),
created_at: r.get("created_at"),
}))
}
// ── Game records ─────────────────────────────────────────────────────────────
/// Creates a new game record when a room opens. Returns the record id.
pub async fn insert_game_record(
pool: &SqlitePool,
pool: &Pool,
game_id: &str,
room_code: &str,
) -> sqlx::Result<i64> {
let id = sqlx::query(
"INSERT INTO game_records (game_id, room_code, started_at) VALUES (?, ?, ?)",
)
.bind(game_id)
.bind(room_code)
.bind(now_unix())
.execute(pool)
.await?
.last_insert_rowid();
Ok(id)
) -> Result<i64, DbError> {
let client = pool.get().await?;
let row = client
.query_one(
"INSERT INTO game_records (game_id, room_code, started_at) \
VALUES ($1, $2, $3) RETURNING id",
&[&game_id, &room_code, &now_unix()],
)
.await?;
Ok(row.get(0))
}
/// Stamps `ended_at` and stores the opaque result JSON supplied by the game.
pub async fn close_game_record(
pool: &SqlitePool,
pool: &Pool,
record_id: i64,
result_json: Option<&str>,
) -> sqlx::Result<()> {
) -> Result<(), DbError> {
// AND ended_at IS NULL prevents overwriting a result already set by POST /games/result
sqlx::query(
"UPDATE game_records SET ended_at = ?, result = ? WHERE id = ? AND ended_at IS NULL",
)
.bind(now_unix())
.bind(result_json)
.bind(record_id)
.execute(pool)
.await?;
let client = pool.get().await?;
client
.execute(
"UPDATE game_records SET ended_at = $1, result = $2 \
WHERE id = $3 AND ended_at IS NULL",
&[&now_unix(), &result_json, &record_id],
)
.await?;
Ok(())
}
/// Records a player's participation in a game. `user_id` is `None` for anonymous players.
pub async fn insert_participant(
pool: &SqlitePool,
pool: &Pool,
record_id: i64,
user_id: Option<i64>,
player_id: u16,
outcome: Option<&str>,
) -> sqlx::Result<()> {
sqlx::query(
"INSERT OR IGNORE INTO game_participants (game_record_id, user_id, player_id, outcome)
VALUES (?, ?, ?, ?)",
)
.bind(record_id)
.bind(user_id)
.bind(player_id as i64)
.bind(outcome)
.execute(pool)
.await?;
) -> Result<(), DbError> {
let client = pool.get().await?;
client
.execute(
"INSERT INTO game_participants (game_record_id, user_id, player_id, outcome) \
VALUES ($1, $2, $3, $4) ON CONFLICT DO NOTHING",
&[&record_id, &user_id, &(player_id as i64), &outcome],
)
.await?;
Ok(())
}
/// Returns win/loss/draw counts for a user. All values are 0 when the user has no games.
pub async fn get_user_stats(pool: &SqlitePool, user_id: i64) -> sqlx::Result<UserStats> {
sqlx::query_as::<_, UserStats>(
"SELECT
COUNT(*) as total,
COALESCE(SUM(CASE WHEN outcome = 'win' THEN 1 ELSE 0 END), 0) as wins,
COALESCE(SUM(CASE WHEN outcome = 'loss' THEN 1 ELSE 0 END), 0) as losses,
COALESCE(SUM(CASE WHEN outcome = 'draw' THEN 1 ELSE 0 END), 0) as draws
FROM game_participants
WHERE user_id = ?",
)
.bind(user_id)
.fetch_one(pool)
.await
pub async fn get_user_stats(pool: &Pool, user_id: i64) -> Result<UserStats, DbError> {
let client = pool.get().await?;
let row = client
.query_one(
"SELECT
COUNT(*) as total,
COALESCE(SUM(CASE WHEN outcome = 'win' THEN 1 ELSE 0 END), 0::BIGINT) as wins,
COALESCE(SUM(CASE WHEN outcome = 'loss' THEN 1 ELSE 0 END), 0::BIGINT) as losses,
COALESCE(SUM(CASE WHEN outcome = 'draw' THEN 1 ELSE 0 END), 0::BIGINT) as draws
FROM game_participants
WHERE user_id = $1",
&[&user_id],
)
.await?;
Ok(UserStats {
total: row.get("total"),
wins: row.get("wins"),
losses: row.get("losses"),
draws: row.get("draws"),
})
}
/// Returns a paginated list of games a user participated in, newest first.
pub async fn get_user_games(
pool: &SqlitePool,
pool: &Pool,
user_id: i64,
page: i64,
per_page: i64,
) -> sqlx::Result<Vec<GameSummary>> {
sqlx::query_as::<_, GameSummary>(
"SELECT gr.id, gr.game_id, gr.room_code, gr.started_at, gr.ended_at, gr.result, gp.outcome
FROM game_records gr
JOIN game_participants gp ON gp.game_record_id = gr.id
WHERE gp.user_id = ?
ORDER BY gr.started_at DESC
LIMIT ? OFFSET ?",
)
.bind(user_id)
.bind(per_page)
.bind(page * per_page)
.fetch_all(pool)
.await
) -> Result<Vec<GameSummary>, DbError> {
let client = pool.get().await?;
let rows = client
.query(
"SELECT gr.id, gr.game_id, gr.room_code, gr.started_at, gr.ended_at, gr.result, gp.outcome
FROM game_records gr
JOIN game_participants gp ON gp.game_record_id = gr.id
WHERE gp.user_id = $1
ORDER BY gr.started_at DESC
LIMIT $2 OFFSET $3",
&[&user_id, &per_page, &(page * per_page)],
)
.await?;
Ok(rows
.into_iter()
.map(|r| GameSummary {
id: r.get("id"),
game_id: r.get("game_id"),
room_code: r.get("room_code"),
started_at: r.get("started_at"),
ended_at: r.get("ended_at"),
result: r.get("result"),
outcome: r.get("outcome"),
})
.collect())
}

View file

@ -43,7 +43,7 @@ pub fn router() -> Router<Arc<AppState>> {
// ── Error type ────────────────────────────────────────────────────────────────
enum AppError {
Database(sqlx::Error),
Database(db::DbError),
NotFound,
Conflict(&'static str),
BadRequest(&'static str),
@ -67,16 +67,12 @@ impl IntoResponse for AppError {
}
}
impl From<sqlx::Error> for AppError {
fn from(e: sqlx::Error) -> Self {
impl From<db::DbError> for AppError {
fn from(e: db::DbError) -> Self {
AppError::Database(e)
}
}
fn is_unique_violation(e: &sqlx::Error) -> bool {
matches!(e, sqlx::Error::Database(db_err) if db_err.message().contains("UNIQUE constraint failed"))
}
// ── Request / response bodies ─────────────────────────────────────────────────
#[derive(Deserialize)]
@ -173,7 +169,7 @@ async fn register(
let user_id = db::create_user(&state.db, &body.username, &body.email, &hash)
.await
.map_err(|e| {
if is_unique_violation(&e) {
if e.is_unique_violation() {
AppError::Conflict("username or email already taken")
} else {
AppError::Database(e)
@ -276,17 +272,7 @@ async fn user_games(
// ── Game detail (Phase 5) ─────────────────────────────────────────────────────
#[derive(sqlx::FromRow, Serialize)]
struct GameRecordRow {
id: i64,
game_id: String,
room_code: String,
started_at: i64,
ended_at: Option<i64>,
result: Option<String>,
}
#[derive(sqlx::FromRow, Serialize)]
#[derive(Serialize)]
struct ParticipantWithUsername {
player_id: i64,
outcome: Option<String>,
@ -308,33 +294,46 @@ async fn game_detail(
Path(id): Path<i64>,
State(state): State<Arc<AppState>>,
) -> Result<impl IntoResponse, AppError> {
let record = sqlx::query_as::<_, GameRecordRow>(
"SELECT id, game_id, room_code, started_at, ended_at, result
FROM game_records WHERE id = ?",
)
.bind(id)
.fetch_optional(&state.db)
.await?
.ok_or(AppError::NotFound)?;
let client = state.db.get().await.map_err(db::DbError::from)?;
let participants = sqlx::query_as::<_, ParticipantWithUsername>(
"SELECT gp.player_id, gp.outcome, u.username
FROM game_participants gp
LEFT JOIN users u ON u.id = gp.user_id
WHERE gp.game_record_id = ?
ORDER BY gp.player_id",
)
.bind(id)
.fetch_all(&state.db)
.await?;
let record = client
.query_opt(
"SELECT id, game_id, room_code, started_at, ended_at, result
FROM game_records WHERE id = $1",
&[&id],
)
.await
.map_err(db::DbError::from)?
.ok_or(AppError::NotFound)?;
let rows = client
.query(
"SELECT gp.player_id, gp.outcome, u.username
FROM game_participants gp
LEFT JOIN users u ON u.id = gp.user_id
WHERE gp.game_record_id = $1
ORDER BY gp.player_id",
&[&id],
)
.await
.map_err(db::DbError::from)?;
let participants = rows
.into_iter()
.map(|r| ParticipantWithUsername {
player_id: r.get("player_id"),
outcome: r.get("outcome"),
username: r.get("username"),
})
.collect();
Ok(Json(GameDetailResponse {
id: record.id,
game_id: record.game_id,
room_code: record.room_code,
started_at: record.started_at,
ended_at: record.ended_at,
result: record.result,
id: record.get("id"),
game_id: record.get("game_id"),
room_code: record.get("room_code"),
started_at: record.get("started_at"),
ended_at: record.get("ended_at"),
result: record.get("result"),
participants,
}))
}
@ -362,7 +361,7 @@ struct GameResultResponse {
///
/// The room code + game ID act as the shared secret (same trust level as WS join).
/// `close_game_record` is idempotent (no-op if already closed), and participant
/// inserts use `INSERT OR IGNORE`, so safe retries are supported.
/// inserts use `ON CONFLICT DO NOTHING`, so safe retries are supported.
async fn game_result(
State(state): State<Arc<AppState>>,
Json(body): Json<GameResultBody>,

View file

@ -6,7 +6,7 @@
use bytes::Bytes;
use serde::{Deserialize, Serialize};
use sqlx::SqlitePool;
use deadpool_postgres::Pool;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::fs;
@ -57,12 +57,12 @@ pub struct AppState {
pub rooms: Mutex<HashMap<String, Room>>,
/// Contains a mapping from game name to the maximum amount of players allowed.
pub configs: RwLock<HashMap<String, u16>>,
/// SQLite connection pool — shared across all request handlers.
pub db: SqlitePool,
/// PostgreSQL connection pool — shared across all request handlers.
pub db: Pool,
}
impl AppState {
pub fn new(db: SqlitePool) -> Self {
pub fn new(db: Pool) -> Self {
Self {
rooms: Mutex::new(HashMap::new()),
configs: RwLock::new(HashMap::new()),

View file

@ -29,7 +29,7 @@ use axum::http::{HeaderName, Method};
use tower_http::cors::{AllowOrigin, CorsLayer};
use tower_http::services::{ServeDir, ServeFile};
use tower_sessions::{Expiry, SessionManagerLayer};
use tower_sessions_sqlx_store::SqliteStore;
use tower_sessions::MemoryStore;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
#[tokio::main]
@ -51,15 +51,11 @@ async fn main() {
)
.init();
let db_path = std::env::var("DATABASE_PATH").unwrap_or_else(|_| "data/relay.db".to_string());
let pool = db::init_db(&db_path).await;
let session_store = SqliteStore::new(pool.clone());
session_store
.migrate()
.await
.expect("Failed to initialize session store");
let database_url = std::env::var("DATABASE_URL")
.unwrap_or_else(|_| "postgresql://trictrac:trictrac@127.0.0.1:5432/trictrac".to_string());
let pool = db::init_db(&database_url).await;
let session_store = MemoryStore::default();
let session_layer = SessionManagerLayer::new(session_store)
.with_secure(false)
.with_expiry(Expiry::OnInactivity(TimeDuration::days(30)));