feat: add delivery sequence numbers + major server/client refactor
Delivery sequence numbers (MLS epoch ordering fix):
- schemas/node.capnp: add Envelope{seq,data} struct; enqueue returns seq:UInt64;
fetch/fetchWait return List(Envelope) instead of List(Data)
- storage.rs: Store trait enqueue returns u64; fetch/fetch_limited return
Vec<(u64, Vec<u8>)>; FileBackedStore gains QueueMapV3 with per-inbox seq
counters and V2→V3 on-disk migration
- migrations/002_add_seq.sql: seq column, delivery_seq_counters table, index
- sql_store.rs: atomic UPSERT counter via RETURNING, ORDER BY seq, SCHEMA_VERSION→3
- node_service/delivery.rs: builds Envelope list; returns seq from enqueue
- client/rpc.rs: enqueue→u64, fetch_all/fetch_wait→Vec<(u64,Vec<u8>)>
- client/commands.rs: sort-by-seq before MLS processing; retry loop in cmd_recv
and receive_pending_plaintexts for correct epoch ordering
Server refactor:
- Split monolithic main.rs into node_service/{mod,delivery,auth_ops,key_ops,p2p_ops}
- Add auth.rs (token validation, rate limiting), config.rs, metrics.rs, tls.rs
- Add SQL migrations runner (001_initial.sql, 002_add_seq.sql)
- OPAQUE PAKE login/registration, sealed-sender mode, queue depth limit (1000)
Client refactor:
- Split lib.rs into client/{commands,rpc,state,retry,hex,mod}
- Add cmd_whoami, cmd_health, cmd_check_key, cmd_ping subcommands
- Add cmd_register_user, cmd_login (OPAQUE), cmd_refresh_keypackage
- Hybrid PQ envelope (X25519 + ML-KEM-768) on all send/recv paths
- E2E test suite expanded
Other:
- quicnprotochat-gui: Tauri 2 desktop GUI skeleton (backend + HTML UI)
- quicnprotochat-p2p: iroh-based P2P transport stub
- quicnprotochat-core: app_message, hybrid_crypto modules; GroupMember API updates
- .github/workflows/size-lint.yml: binary size regression check
- docs: protocol comparison, roadmap updates, fully-operational checklist
This commit is contained in:
86
crates/quicnprotochat-gui/src/backend.rs
Normal file
86
crates/quicnprotochat-gui/src/backend.rs
Normal file
@@ -0,0 +1,86 @@
|
||||
//! Backend service running on a dedicated thread with a tokio LocalSet.
|
||||
//!
|
||||
//! All server-facing work (capnp-rpc, node_service::Client) is !Send and must run on this
|
||||
//! single thread. The UI thread sends commands over a channel; this thread runs
|
||||
//! `LocalSet::run_until` for each request and sends the result back.
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::mpsc;
|
||||
use std::thread;
|
||||
|
||||
use tokio::runtime::Builder;
|
||||
use tokio::task::LocalSet;
|
||||
|
||||
use quicnprotochat_client::{cmd_health_json, whoami_json};
|
||||
|
||||
/// Commands the UI can send to the backend thread.
|
||||
pub enum BackendCommand {
|
||||
Whoami {
|
||||
state_path: String,
|
||||
password: Option<String>,
|
||||
},
|
||||
Health {
|
||||
server: String,
|
||||
ca_cert: PathBuf,
|
||||
server_name: String,
|
||||
},
|
||||
}
|
||||
|
||||
/// Response sent back to the UI.
|
||||
pub type BackendResponse = Result<String, String>;
|
||||
|
||||
/// Spawn the backend thread and return a sender to post commands and a join handle.
|
||||
/// The backend runs a tokio LocalSet and processes one command at a time:
|
||||
/// for each received command it runs `LocalSet::run_until(future)` (for async commands)
|
||||
/// or runs sync code (whoami), then sends the result on the provided reply channel.
|
||||
pub fn spawn_backend() -> (mpsc::Sender<(BackendCommand, mpsc::Sender<BackendResponse>)>, thread::JoinHandle<()>) {
|
||||
let (tx, rx) = mpsc::channel::<(BackendCommand, mpsc::Sender<BackendResponse>)>();
|
||||
|
||||
let handle = thread::spawn(move || {
|
||||
let rt = Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("backend tokio runtime");
|
||||
let local = LocalSet::new();
|
||||
|
||||
while let Ok((cmd, reply_tx)) = rx.recv() {
|
||||
let result = run_command(&local, &rt, cmd);
|
||||
let _ = reply_tx.send(result);
|
||||
}
|
||||
});
|
||||
|
||||
(tx, handle)
|
||||
}
|
||||
|
||||
fn run_command(
|
||||
local: &LocalSet,
|
||||
rt: &tokio::runtime::Runtime,
|
||||
cmd: BackendCommand,
|
||||
) -> BackendResponse {
|
||||
match cmd {
|
||||
BackendCommand::Whoami { state_path, password } => {
|
||||
let path = PathBuf::from(&state_path);
|
||||
whoami_json(&path, password.as_deref()).map_err(|e| e.to_string())
|
||||
}
|
||||
BackendCommand::Health {
|
||||
server,
|
||||
ca_cert,
|
||||
server_name,
|
||||
} => {
|
||||
// Request-response: we run LocalSet::run_until for this single request so capnp-rpc
|
||||
// and connect_node stay on this thread (!Send).
|
||||
let fut = cmd_health_json(&server, &ca_cert, &server_name);
|
||||
rt.block_on(local.run_until(fut)).map_err(|e| e.to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Default CA cert path (relative to cwd or absolute); same default as CLI.
|
||||
pub fn default_ca_cert() -> PathBuf {
|
||||
PathBuf::from("data/server-cert.der")
|
||||
}
|
||||
|
||||
/// Default TLS server name.
|
||||
pub fn default_server_name() -> String {
|
||||
"localhost".to_string()
|
||||
}
|
||||
76
crates/quicnprotochat-gui/src/lib.rs
Normal file
76
crates/quicnprotochat-gui/src/lib.rs
Normal file
@@ -0,0 +1,76 @@
|
||||
//! quicnprotochat native GUI (Tauri 2).
|
||||
//!
|
||||
//! The backend runs on a dedicated thread with a tokio LocalSet; all server-facing
|
||||
//! work (capnp-rpc, node_service::Client) is dispatched there. Tauri commands
|
||||
//! block on the request-response channel until the backend returns.
|
||||
|
||||
mod backend;
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::mpsc;
|
||||
|
||||
use backend::{spawn_backend, BackendCommand};
|
||||
|
||||
/// Shared state: sender to the backend thread.
|
||||
struct BackendState {
|
||||
tx: mpsc::Sender<(BackendCommand, mpsc::Sender<backend::BackendResponse>)>,
|
||||
}
|
||||
|
||||
/// Runs whoami on the backend thread and returns JSON string (identity_key, fingerprint, etc.).
|
||||
#[tauri::command]
|
||||
fn whoami(
|
||||
state: tauri::State<BackendState>,
|
||||
state_path: String,
|
||||
password: Option<String>,
|
||||
) -> Result<String, String> {
|
||||
let (reply_tx, reply_rx) = mpsc::channel();
|
||||
state
|
||||
.tx
|
||||
.send((
|
||||
BackendCommand::Whoami {
|
||||
state_path,
|
||||
password,
|
||||
},
|
||||
reply_tx,
|
||||
))
|
||||
.map_err(|e| e.to_string())?;
|
||||
reply_rx.recv().map_err(|e| e.to_string())?
|
||||
}
|
||||
|
||||
/// Runs health check on the backend thread (LocalSet::run_until) and returns status JSON.
|
||||
#[tauri::command]
|
||||
fn health(
|
||||
state: tauri::State<BackendState>,
|
||||
server: String,
|
||||
ca_cert: Option<String>,
|
||||
server_name: Option<String>,
|
||||
) -> Result<String, String> {
|
||||
let ca_cert = ca_cert
|
||||
.map(PathBuf::from)
|
||||
.unwrap_or_else(backend::default_ca_cert);
|
||||
let server_name = server_name.unwrap_or_else(backend::default_server_name);
|
||||
let (reply_tx, reply_rx) = mpsc::channel();
|
||||
state
|
||||
.tx
|
||||
.send((
|
||||
BackendCommand::Health {
|
||||
server,
|
||||
ca_cert,
|
||||
server_name,
|
||||
},
|
||||
reply_tx,
|
||||
))
|
||||
.map_err(|e| e.to_string())?;
|
||||
reply_rx.recv().map_err(|e| e.to_string())?
|
||||
}
|
||||
|
||||
#[cfg_attr(mobile, tauri::mobile_entry_point)]
|
||||
pub fn run() {
|
||||
let (backend_tx, _backend_handle) = spawn_backend();
|
||||
|
||||
tauri::Builder::default()
|
||||
.manage(BackendState { tx: backend_tx })
|
||||
.invoke_handler(tauri::generate_handler![whoami, health])
|
||||
.run(tauri::generate_context!())
|
||||
.expect("error while running tauri application");
|
||||
}
|
||||
5
crates/quicnprotochat-gui/src/main.rs
Normal file
5
crates/quicnprotochat-gui/src/main.rs
Normal file
@@ -0,0 +1,5 @@
|
||||
//! Desktop entry point for quicnprotochat-gui.
|
||||
|
||||
fn main() {
|
||||
quicnprotochat_gui::run()
|
||||
}
|
||||
Reference in New Issue
Block a user