feat: add protocol comparison docs, P2P crate, production audit, and design fixes
Add comprehensive documentation comparing quicnprotochat against classical chat protocols (IRC+SSL, XMPP, Telegram) with diagrams and attack scenarios. Promote comparison pages to top-level sidebar section. Include P2P transport crate (iroh), production readiness audit, CI workflows, dependency policy, and continued architecture improvements across all crates. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
15
.github/CODEOWNERS
vendored
Normal file
15
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# Code owners for quicnprotochat. PRs require review from owners.
|
||||
# See https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
|
||||
# Replace 'maintainers' with your GitHub user/team handle.
|
||||
|
||||
# Default owners for everything in the repo
|
||||
* @maintainers
|
||||
|
||||
# Crate-specific (uncomment and add handles when you have designated owners)
|
||||
# /crates/quicnprotochat-core/ @owner1
|
||||
# /crates/quicnprotochat-proto/ @owner1
|
||||
# /crates/quicnprotochat-server/ @owner1
|
||||
# /crates/quicnprotochat-client/ @owner1
|
||||
# /crates/quicnprotochat-p2p/ @owner1
|
||||
# /schemas/ @owner1
|
||||
# /docs/ @owner1
|
||||
74
.github/workflows/ci.yml
vendored
Normal file
74
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, master]
|
||||
pull_request:
|
||||
branches: [main, master]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-action@stable
|
||||
with:
|
||||
components: clippy, rustfmt
|
||||
|
||||
- name: Install capnp
|
||||
run: sudo apt-get update && sudo apt-get install -y capnproto
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Check format
|
||||
run: cargo fmt --all -- --check
|
||||
|
||||
- name: Build
|
||||
run: cargo build --workspace
|
||||
|
||||
- name: Test
|
||||
run: cargo test --workspace
|
||||
|
||||
- name: Clippy
|
||||
run: cargo clippy --workspace --all-targets --
|
||||
|
||||
deny:
|
||||
name: cargo-deny
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install cargo-deny
|
||||
run: cargo install cargo-deny --locked
|
||||
|
||||
- name: Check deny
|
||||
run: cargo deny check
|
||||
|
||||
audit:
|
||||
name: cargo-audit
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-action@stable
|
||||
|
||||
- name: Run audit
|
||||
run: |
|
||||
cargo install cargo-audit --locked
|
||||
cargo audit
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -3,3 +3,8 @@
|
||||
.vscode/
|
||||
gitea-mcp.json
|
||||
docs/book/
|
||||
|
||||
# Server/client runtime data — do not commit certs, keys, or DBs
|
||||
data/
|
||||
*.der
|
||||
quicnprotochat-server.toml
|
||||
|
||||
@@ -46,7 +46,7 @@ pub struct ClientAuth {
|
||||
|
||||
impl ClientAuth {
|
||||
/// Build a client auth context from optional token and device id.
|
||||
/// Requires a non-empty token; we run version=1 only (no legacy mode).
|
||||
/// Requires a non-empty token (auth version 1).
|
||||
pub fn from_parts(access_token: String, device_id: Option<String>) -> Self {
|
||||
let token = access_token.into_bytes();
|
||||
let device = device_id.unwrap_or_default().into_bytes();
|
||||
@@ -102,9 +102,8 @@ pub async fn cmd_register_user(
|
||||
let node_client = connect_node(server, ca_cert, server_name).await?;
|
||||
|
||||
// OPAQUE registration step 1: client -> server.
|
||||
let reg_start =
|
||||
ClientRegistration::<OpaqueSuite>::start(&mut rng, password.as_bytes())
|
||||
.map_err(|e| anyhow::anyhow!("OPAQUE register start: {e}"))?;
|
||||
let reg_start = ClientRegistration::<OpaqueSuite>::start(&mut rng, password.as_bytes())
|
||||
.map_err(|e| anyhow::anyhow!("OPAQUE register start: {e}"))?;
|
||||
|
||||
let mut req = node_client.opaque_register_start_request();
|
||||
{
|
||||
@@ -178,9 +177,8 @@ pub async fn cmd_login(
|
||||
let node_client = connect_node(server, ca_cert, server_name).await?;
|
||||
|
||||
// OPAQUE login step 1: client -> server.
|
||||
let login_start =
|
||||
ClientLogin::<OpaqueSuite>::start(&mut rng, password.as_bytes())
|
||||
.map_err(|e| anyhow::anyhow!("OPAQUE login start: {e}"))?;
|
||||
let login_start = ClientLogin::<OpaqueSuite>::start(&mut rng, password.as_bytes())
|
||||
.map_err(|e| anyhow::anyhow!("OPAQUE login start: {e}"))?;
|
||||
|
||||
let mut req = node_client.opaque_login_start_request();
|
||||
{
|
||||
@@ -234,7 +232,10 @@ pub async fn cmd_login(
|
||||
.context("login_finish: missing session_token")?
|
||||
.to_vec();
|
||||
|
||||
anyhow::ensure!(!session_token.is_empty(), "server returned empty session token");
|
||||
anyhow::ensure!(
|
||||
!session_token.is_empty(),
|
||||
"server returned empty session token"
|
||||
);
|
||||
|
||||
println!("login successful for '{username}'");
|
||||
println!("session_token: {}", hex::encode(&session_token));
|
||||
@@ -259,7 +260,7 @@ pub async fn cmd_register(server: &str, ca_cert: &Path, server_name: &str) -> an
|
||||
p.set_identity_key(&identity.public_key_bytes());
|
||||
p.set_package(&tls_bytes);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth);
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let response = req
|
||||
@@ -316,7 +317,7 @@ pub async fn cmd_register_state(
|
||||
p.set_identity_key(&member.identity().public_key_bytes());
|
||||
p.set_package(&tls_bytes);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth);
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let response = req
|
||||
@@ -381,7 +382,7 @@ pub async fn cmd_fetch_key(
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(&identity_key);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth);
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let response = req
|
||||
@@ -487,8 +488,8 @@ pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) ->
|
||||
.context("Welcome was not delivered to Bob via DS")?;
|
||||
|
||||
// Bob unwraps the hybrid envelope and joins the group.
|
||||
let welcome_bytes = hybrid_decrypt(&bob_hybrid, &raw_welcome)
|
||||
.context("Bob: hybrid decrypt welcome failed")?;
|
||||
let welcome_bytes =
|
||||
hybrid_decrypt(&bob_hybrid, &raw_welcome).context("Bob: hybrid decrypt welcome failed")?;
|
||||
bob.join_group(&welcome_bytes)
|
||||
.context("Bob join_group failed")?;
|
||||
|
||||
@@ -496,8 +497,7 @@ pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) ->
|
||||
let ct_ab = alice
|
||||
.send_message(b"hello bob")
|
||||
.context("Alice send_message failed")?;
|
||||
let wrapped_ab =
|
||||
hybrid_encrypt(&bob_hybrid_pk, &ct_ab).context("hybrid encrypt Alice->Bob")?;
|
||||
let wrapped_ab = hybrid_encrypt(&bob_hybrid_pk, &ct_ab).context("hybrid encrypt Alice->Bob")?;
|
||||
enqueue(&alice_ds, &bob_id.public_key_bytes(), &wrapped_ab).await?;
|
||||
|
||||
let bob_msgs = fetch_all(&bob_ds, &bob_id.public_key_bytes()).await?;
|
||||
@@ -528,8 +528,7 @@ pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) ->
|
||||
let raw_ba = alice_msgs
|
||||
.first()
|
||||
.context("Alice: missing Bob ciphertext from DS")?;
|
||||
let inner_ba =
|
||||
hybrid_decrypt(&alice_hybrid, raw_ba).context("Alice: hybrid decrypt failed")?;
|
||||
let inner_ba = hybrid_decrypt(&alice_hybrid, raw_ba).context("Alice: hybrid decrypt failed")?;
|
||||
let ba_plaintext = alice
|
||||
.receive_message(&inner_ba)?
|
||||
.context("Alice expected application message from Bob")?;
|
||||
@@ -632,7 +631,11 @@ pub async fn cmd_invite(
|
||||
save_state(state_path, &member, hybrid_kp.as_ref(), password)?;
|
||||
println!(
|
||||
"invited peer (welcome queued{}, commit sent to {} existing member(s))",
|
||||
if peer_hybrid_pk.is_some() { ", hybrid-encrypted" } else { "" },
|
||||
if peer_hybrid_pk.is_some() {
|
||||
", hybrid-encrypted"
|
||||
} else {
|
||||
""
|
||||
},
|
||||
existing_members.len(),
|
||||
);
|
||||
Ok(())
|
||||
@@ -663,8 +666,8 @@ pub async fn cmd_join(
|
||||
.cloned()
|
||||
.context("no Welcome found in DS for this identity")?;
|
||||
|
||||
// Try hybrid decryption first, fall back to raw MLS welcome.
|
||||
let welcome_bytes = try_hybrid_unwrap(hybrid_kp.as_ref(), &raw_welcome);
|
||||
let welcome_bytes = try_hybrid_decrypt(hybrid_kp.as_ref(), &raw_welcome)
|
||||
.context("decrypt Welcome (hybrid required)")?;
|
||||
|
||||
member
|
||||
.join_group(&welcome_bytes)
|
||||
@@ -711,7 +714,11 @@ pub async fn cmd_send(
|
||||
save_state(state_path, &member, hybrid_kp.as_ref(), password)?;
|
||||
println!(
|
||||
"message sent{}",
|
||||
if peer_hybrid_pk.is_some() { " (hybrid-encrypted)" } else { "" }
|
||||
if peer_hybrid_pk.is_some() {
|
||||
" (hybrid-encrypted)"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
@@ -745,8 +752,13 @@ pub async fn cmd_recv(
|
||||
}
|
||||
|
||||
for (idx, payload) in payloads.iter().enumerate() {
|
||||
// Try hybrid decryption, fall back to raw MLS payload.
|
||||
let mls_payload = try_hybrid_unwrap(hybrid_kp.as_ref(), payload);
|
||||
let mls_payload = match try_hybrid_decrypt(hybrid_kp.as_ref(), payload) {
|
||||
Ok(b) => b,
|
||||
Err(e) => {
|
||||
println!("[{idx}] decrypt error: {e}");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
match member.receive_message(&mls_payload) {
|
||||
Ok(Some(pt)) => println!("[{idx}] plaintext: {}", String::from_utf8_lossy(&pt)),
|
||||
@@ -791,7 +803,8 @@ pub async fn connect_node(
|
||||
let crypto = QuicClientConfig::try_from(tls)
|
||||
.map_err(|e| anyhow::anyhow!("invalid client TLS config: {e}"))?;
|
||||
|
||||
let mut endpoint = Endpoint::client("0.0.0.0:0".parse().unwrap())?;
|
||||
let bind_addr: SocketAddr = "0.0.0.0:0".parse().context("parse client bind address")?;
|
||||
let mut endpoint = Endpoint::client(bind_addr)?;
|
||||
endpoint.set_default_client_config(ClientConfig::new(Arc::new(crypto)));
|
||||
|
||||
let connection = endpoint
|
||||
@@ -829,7 +842,7 @@ pub async fn upload_key_package(
|
||||
p.set_identity_key(identity_key);
|
||||
p.set_package(package);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth);
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
@@ -860,7 +873,7 @@ pub async fn fetch_key_package(
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth);
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
@@ -893,7 +906,7 @@ pub async fn enqueue(
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth);
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
req.send().promise.await.context("enqueue RPC failed")?;
|
||||
Ok(())
|
||||
@@ -910,9 +923,9 @@ pub async fn fetch_all(
|
||||
p.set_recipient_key(recipient_key);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
p.set_limit(0); // fetch all (backward compat)
|
||||
p.set_limit(0); // fetch all
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth);
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req.send().promise.await.context("fetch RPC failed")?;
|
||||
@@ -944,9 +957,9 @@ pub async fn fetch_wait(
|
||||
p.set_timeout_ms(timeout_ms);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
p.set_limit(0); // fetch all (backward compat)
|
||||
p.set_limit(0); // fetch all
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth);
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req.send().promise.await.context("fetch_wait RPC failed")?;
|
||||
@@ -981,7 +994,7 @@ pub async fn upload_hybrid_key(
|
||||
p.set_identity_key(identity_key);
|
||||
p.set_hybrid_public_key(&hybrid_pk.to_bytes());
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth);
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
req.send()
|
||||
.promise
|
||||
@@ -1002,7 +1015,7 @@ pub async fn fetch_hybrid_key(
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth);
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
@@ -1026,15 +1039,13 @@ pub async fn fetch_hybrid_key(
|
||||
Ok(Some(pk))
|
||||
}
|
||||
|
||||
/// Try to decrypt a hybrid envelope. If the payload is not a hybrid envelope or
|
||||
/// decryption fails, return the original bytes unchanged (legacy plaintext MLS).
|
||||
fn try_hybrid_unwrap(hybrid_kp: Option<&HybridKeypair>, payload: &[u8]) -> Vec<u8> {
|
||||
if let Some(kp) = hybrid_kp {
|
||||
if let Ok(inner) = hybrid_decrypt(kp, payload) {
|
||||
return inner;
|
||||
}
|
||||
}
|
||||
payload.to_vec()
|
||||
/// Decrypt a hybrid envelope. Requires a hybrid key; no fallback to plaintext MLS.
|
||||
fn try_hybrid_decrypt(
|
||||
hybrid_kp: Option<&HybridKeypair>,
|
||||
payload: &[u8],
|
||||
) -> anyhow::Result<Vec<u8>> {
|
||||
let kp = hybrid_kp.ok_or_else(|| anyhow::anyhow!("hybrid key required for decryption"))?;
|
||||
hybrid_decrypt(kp, payload).map_err(|e| anyhow::anyhow!("{e}"))
|
||||
}
|
||||
|
||||
fn sha256(bytes: &[u8]) -> Vec<u8> {
|
||||
@@ -1042,20 +1053,21 @@ fn sha256(bytes: &[u8]) -> Vec<u8> {
|
||||
Sha256::digest(bytes).to_vec()
|
||||
}
|
||||
|
||||
fn set_auth(auth: &mut auth::Builder<'_>) {
|
||||
let ctx = AUTH_CONTEXT
|
||||
.get()
|
||||
.expect("init_auth must be called with a non-empty token before RPCs");
|
||||
fn set_auth(auth: &mut auth::Builder<'_>) -> anyhow::Result<()> {
|
||||
let ctx = AUTH_CONTEXT.get().ok_or_else(|| {
|
||||
anyhow::anyhow!("init_auth must be called with a non-empty token before RPCs")
|
||||
})?;
|
||||
auth.set_version(ctx.version);
|
||||
auth.set_access_token(&ctx.access_token);
|
||||
auth.set_device_id(&ctx.device_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct StoredState {
|
||||
identity_seed: [u8; 32],
|
||||
group: Option<Vec<u8>>,
|
||||
/// Post-quantum hybrid keypair (X25519 + ML-KEM-768). `None` for legacy state files.
|
||||
/// Post-quantum hybrid keypair (X25519 + ML-KEM-768). `None` for state created before hybrid was added; generated on load if missing.
|
||||
#[serde(default)]
|
||||
hybrid_key: Option<HybridKeypairBytes>,
|
||||
/// Cached member public keys for group participants (Fix 14 prep).
|
||||
@@ -1081,10 +1093,7 @@ impl StoredState {
|
||||
Ok((member, hybrid_kp))
|
||||
}
|
||||
|
||||
fn from_parts(
|
||||
member: &GroupMember,
|
||||
hybrid_kp: Option<&HybridKeypair>,
|
||||
) -> anyhow::Result<Self> {
|
||||
fn from_parts(member: &GroupMember, hybrid_kp: Option<&HybridKeypair>) -> anyhow::Result<Self> {
|
||||
let group = member
|
||||
.group_ref()
|
||||
.map(|g| bincode::serialize(g).context("serialize group"))
|
||||
@@ -1166,7 +1175,7 @@ fn is_encrypted_state(bytes: &[u8]) -> bool {
|
||||
fn load_or_init_state(path: &Path, password: Option<&str>) -> anyhow::Result<StoredState> {
|
||||
if path.exists() {
|
||||
let mut state = load_existing_state(path, password)?;
|
||||
// Upgrade legacy state files: generate hybrid keypair if missing.
|
||||
// Generate hybrid keypair if missing (upgrade from older state).
|
||||
if state.hybrid_key.is_none() {
|
||||
state.hybrid_key = Some(HybridKeypair::generate().to_bytes());
|
||||
write_state(path, &state, password)?;
|
||||
@@ -1187,9 +1196,8 @@ fn load_existing_state(path: &Path, password: Option<&str>) -> anyhow::Result<St
|
||||
let bytes = std::fs::read(path).with_context(|| format!("read state file {path:?}"))?;
|
||||
|
||||
if is_encrypted_state(&bytes) {
|
||||
let pw = password.context(
|
||||
"state file is encrypted (QPCE); a password is required to decrypt it",
|
||||
)?;
|
||||
let pw = password
|
||||
.context("state file is encrypted (QPCE); a password is required to decrypt it")?;
|
||||
let plaintext = decrypt_state(pw, &bytes)?;
|
||||
bincode::deserialize(&plaintext).context("decode encrypted state")
|
||||
} else {
|
||||
|
||||
@@ -6,8 +6,7 @@ use clap::{Parser, Subcommand};
|
||||
|
||||
use quicnprotochat_client::{
|
||||
cmd_create_group, cmd_demo_group, cmd_fetch_key, cmd_invite, cmd_join, cmd_login, cmd_ping,
|
||||
cmd_recv, cmd_register, cmd_register_state, cmd_register_user, cmd_send, ClientAuth,
|
||||
init_auth,
|
||||
cmd_recv, cmd_register, cmd_register_state, cmd_register_user, cmd_send, init_auth, ClientAuth,
|
||||
};
|
||||
|
||||
// ── CLI ───────────────────────────────────────────────────────────────────────
|
||||
@@ -35,7 +34,12 @@ struct Args {
|
||||
|
||||
/// Bearer token or OPAQUE session token for authenticated requests.
|
||||
/// Not required for register-user and login commands.
|
||||
#[arg(long, global = true, env = "QUICNPROTOCHAT_ACCESS_TOKEN", default_value = "")]
|
||||
#[arg(
|
||||
long,
|
||||
global = true,
|
||||
env = "QUICNPROTOCHAT_ACCESS_TOKEN",
|
||||
default_value = ""
|
||||
)]
|
||||
access_token: String,
|
||||
|
||||
/// Optional device identifier (UUID bytes encoded as hex or raw string).
|
||||
@@ -327,7 +331,13 @@ async fn main() -> anyhow::Result<()> {
|
||||
Command::Join { state, server } => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_join(&state, &server, &args.ca_cert, &args.server_name, state_pw))
|
||||
.run_until(cmd_join(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
state_pw,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Send {
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// cargo_bin! only works for current package's binary; we spawn quicnprotochat-server from another package.
|
||||
#![allow(deprecated)]
|
||||
|
||||
use std::{path::PathBuf, process::Command, time::Duration};
|
||||
|
||||
use assert_cmd::cargo::cargo_bin;
|
||||
@@ -5,9 +8,14 @@ use portpicker::pick_unused_port;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::sleep;
|
||||
|
||||
// Required by rustls 0.23 when QUIC/TLS is used from this process (e.g. client in test).
|
||||
fn ensure_rustls_provider() {
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
}
|
||||
|
||||
use quicnprotochat_client::{
|
||||
cmd_create_group, cmd_invite, cmd_join, cmd_ping, cmd_register_state, cmd_send, ClientAuth,
|
||||
connect_node, fetch_wait, init_auth,
|
||||
cmd_create_group, cmd_invite, cmd_join, cmd_ping, cmd_register_state, cmd_send, connect_node,
|
||||
fetch_wait, init_auth, ClientAuth,
|
||||
};
|
||||
use quicnprotochat_core::IdentityKeypair;
|
||||
|
||||
@@ -39,6 +47,8 @@ async fn wait_for_health(server: &str, ca_cert: &PathBuf, server_name: &str) ->
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
|
||||
ensure_rustls_provider();
|
||||
|
||||
let temp = TempDir::new()?;
|
||||
let base = temp.path();
|
||||
let port = pick_unused_port().expect("free port");
|
||||
@@ -51,7 +61,7 @@ async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
|
||||
|
||||
// Spawn server binary.
|
||||
let server_bin = cargo_bin("quicnprotochat-server");
|
||||
let mut child = Command::new(server_bin)
|
||||
let child = Command::new(server_bin)
|
||||
.arg("--listen")
|
||||
.arg(&listen)
|
||||
.arg("--data-dir")
|
||||
@@ -108,12 +118,7 @@ async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_create_group(
|
||||
&alice_state,
|
||||
&server,
|
||||
"test-group",
|
||||
None,
|
||||
))
|
||||
.run_until(cmd_create_group(&alice_state, &server, "test-group", None))
|
||||
.await?;
|
||||
|
||||
// Load Bob identity key from persisted state to use as peer key.
|
||||
@@ -134,13 +139,7 @@ async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_join(
|
||||
&bob_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
))
|
||||
.run_until(cmd_join(&bob_state, &server, &ca_cert, "localhost", None))
|
||||
.await?;
|
||||
|
||||
// Send Alice -> Bob.
|
||||
|
||||
@@ -185,7 +185,7 @@ impl GroupMember {
|
||||
/// group exists, or openmls fails.
|
||||
pub fn add_member(
|
||||
&mut self,
|
||||
key_package_bytes: &[u8],
|
||||
mut key_package_bytes: &[u8],
|
||||
) -> Result<(Vec<u8>, Vec<u8>), CoreError> {
|
||||
let group = self
|
||||
.group
|
||||
@@ -196,7 +196,7 @@ impl GroupMember {
|
||||
// TlsSerialize; KeyPackageIn derives TlsDeserialize and provides validate()
|
||||
// which verifies the signature and returns a trusted KeyPackage.
|
||||
let key_package: KeyPackage =
|
||||
KeyPackageIn::tls_deserialize(&mut key_package_bytes.as_ref())
|
||||
KeyPackageIn::tls_deserialize(&mut key_package_bytes)
|
||||
.map_err(|e| CoreError::Mls(format!("KeyPackage deserialise: {e:?}")))?
|
||||
.validate(self.backend.crypto(), ProtocolVersion::Mls10)
|
||||
.map_err(|e| CoreError::Mls(format!("KeyPackage validate: {e:?}")))?;
|
||||
@@ -234,9 +234,9 @@ impl GroupMember {
|
||||
/// KeyPackage, or openmls validation fails.
|
||||
///
|
||||
/// [`generate_key_package`]: Self::generate_key_package
|
||||
pub fn join_group(&mut self, welcome_bytes: &[u8]) -> Result<(), CoreError> {
|
||||
pub fn join_group(&mut self, mut welcome_bytes: &[u8]) -> Result<(), CoreError> {
|
||||
// Deserialise MlsMessageIn, then extract the inner Welcome.
|
||||
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut welcome_bytes.as_ref())
|
||||
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut welcome_bytes)
|
||||
.map_err(|e| CoreError::Mls(format!("Welcome deserialise: {e:?}")))?;
|
||||
|
||||
// into_welcome() is feature-gated in openmls 0.5; extract() is public.
|
||||
@@ -291,13 +291,13 @@ impl GroupMember {
|
||||
///
|
||||
/// Returns [`CoreError::Mls`] if the message is malformed, fails
|
||||
/// authentication, or the group state is inconsistent.
|
||||
pub fn receive_message(&mut self, bytes: &[u8]) -> Result<Option<Vec<u8>>, CoreError> {
|
||||
pub fn receive_message(&mut self, mut bytes: &[u8]) -> Result<Option<Vec<u8>>, CoreError> {
|
||||
let group = self
|
||||
.group
|
||||
.as_mut()
|
||||
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
|
||||
|
||||
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut bytes.as_ref())
|
||||
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut bytes)
|
||||
.map_err(|e| CoreError::Mls(format!("message deserialise: {e:?}")))?;
|
||||
|
||||
// into_protocol_message() is feature-gated; extract() + manual construction is not.
|
||||
|
||||
@@ -236,10 +236,7 @@ pub fn hybrid_encrypt(
|
||||
}
|
||||
|
||||
/// Decrypt a hybrid envelope using the recipient's private key.
|
||||
pub fn hybrid_decrypt(
|
||||
keypair: &HybridKeypair,
|
||||
envelope: &[u8],
|
||||
) -> Result<Vec<u8>, HybridKemError> {
|
||||
pub fn hybrid_decrypt(keypair: &HybridKeypair, envelope: &[u8]) -> Result<Vec<u8>, HybridKemError> {
|
||||
if envelope.len() < HEADER_LEN + 16 {
|
||||
// 16 = minimum AEAD tag
|
||||
return Err(HybridKemError::TooShort(envelope.len()));
|
||||
@@ -274,8 +271,8 @@ pub fn hybrid_decrypt(
|
||||
|
||||
// 2. ML-KEM decapsulation — convert bytes to the ciphertext array type
|
||||
// that `DecapsulationKey::decapsulate` expects.
|
||||
let mlkem_ct_arr = Array::try_from(mlkem_ct_bytes)
|
||||
.map_err(|_| HybridKemError::MlKemDecapsFailed)?;
|
||||
let mlkem_ct_arr =
|
||||
Array::try_from(mlkem_ct_bytes).map_err(|_| HybridKemError::MlKemDecapsFailed)?;
|
||||
let mlkem_ss = keypair
|
||||
.mlkem_dk
|
||||
.decapsulate(&mlkem_ct_arr)
|
||||
@@ -419,10 +416,7 @@ mod tests {
|
||||
let restored = HybridKeypair::from_bytes(&bytes).unwrap();
|
||||
|
||||
assert_eq!(kp.x25519_pk.to_bytes(), restored.x25519_pk.to_bytes());
|
||||
assert_eq!(
|
||||
kp.public_key().mlkem_ek,
|
||||
restored.public_key().mlkem_ek
|
||||
);
|
||||
assert_eq!(kp.public_key().mlkem_ek, restored.public_key().mlkem_ek);
|
||||
|
||||
// Verify restored keypair can decrypt
|
||||
let pk = kp.public_key();
|
||||
|
||||
@@ -18,15 +18,44 @@
|
||||
|
||||
use openmls::prelude::{
|
||||
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, KeyPackage,
|
||||
TlsSerializeTrait,
|
||||
KeyPackageIn, TlsDeserializeTrait, TlsSerializeTrait,
|
||||
};
|
||||
use openmls_rust_crypto::OpenMlsRustCrypto;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
use crate::{error::CoreError, identity::IdentityKeypair};
|
||||
|
||||
/// The MLS ciphersuite used throughout quicnprotochat.
|
||||
const CIPHERSUITE: Ciphersuite = Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
|
||||
/// The MLS ciphersuite used throughout quicnprotochat (RFC 9420 §17.1).
|
||||
pub const ALLOWED_CIPHERSUITE: Ciphersuite =
|
||||
Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
|
||||
|
||||
/// Wire value of the allowed ciphersuite (KeyPackage TLS encoding: version 2B, ciphersuite 2B).
|
||||
const ALLOWED_CIPHERSUITE_WIRE: u16 = 0x0001;
|
||||
|
||||
const CIPHERSUITE: Ciphersuite = ALLOWED_CIPHERSUITE;
|
||||
|
||||
/// Validates that the KeyPackage bytes use an allowed ciphersuite (Phase 2: ciphersuite allowlist).
|
||||
///
|
||||
/// Parses the TLS-encoded KeyPackage and rejects if the ciphersuite is not
|
||||
/// `MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519`. Does not verify signatures;
|
||||
/// the server uses this only to enforce policy before storing.
|
||||
pub fn validate_keypackage_ciphersuite(bytes: &[u8]) -> Result<(), CoreError> {
|
||||
if bytes.len() < 4 {
|
||||
return Err(CoreError::Mls("KeyPackage too short for version+ciphersuite".into()));
|
||||
}
|
||||
let cs_wire = u16::from_be_bytes([bytes[2], bytes[3]]);
|
||||
if cs_wire != ALLOWED_CIPHERSUITE_WIRE {
|
||||
return Err(CoreError::Mls(format!(
|
||||
"KeyPackage ciphersuite {:#06x} not in allowlist (only {:#06x} allowed)",
|
||||
cs_wire, ALLOWED_CIPHERSUITE_WIRE
|
||||
)));
|
||||
}
|
||||
// Optionally confirm full parse so we don't accept garbage that happens to have 0x0001 at offset 2.
|
||||
let mut cursor = bytes;
|
||||
let _kp = KeyPackageIn::tls_deserialize(&mut cursor)
|
||||
.map_err(|e| CoreError::Mls(format!("KeyPackage parse: {e:?}")))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Generate a fresh MLS KeyPackage for `identity` and serialise it.
|
||||
///
|
||||
|
||||
@@ -25,9 +25,9 @@ pub mod opaque_auth;
|
||||
pub use error::CoreError;
|
||||
pub use group::GroupMember;
|
||||
pub use hybrid_kem::{
|
||||
hybrid_decrypt, hybrid_encrypt, HybridKeypair, HybridKeypairBytes, HybridKemError,
|
||||
hybrid_decrypt, hybrid_encrypt, HybridKemError, HybridKeypair, HybridKeypairBytes,
|
||||
HybridPublicKey,
|
||||
};
|
||||
pub use identity::IdentityKeypair;
|
||||
pub use keypackage::generate_key_package;
|
||||
pub use keypackage::{generate_key_package, validate_keypackage_ciphersuite};
|
||||
pub use keystore::DiskKeyStore;
|
||||
|
||||
@@ -14,9 +14,7 @@ pub struct OpaqueSuite;
|
||||
|
||||
impl CipherSuite for OpaqueSuite {
|
||||
type OprfCs = opaque_ke::Ristretto255;
|
||||
type KeyExchange = opaque_ke::key_exchange::tripledh::TripleDh<
|
||||
opaque_ke::Ristretto255,
|
||||
sha2::Sha512,
|
||||
>;
|
||||
type KeyExchange =
|
||||
opaque_ke::key_exchange::tripledh::TripleDh<opaque_ke::Ristretto255, sha2::Sha512>;
|
||||
type Ksf = argon2::Argon2<'static>;
|
||||
}
|
||||
|
||||
12
crates/quicnprotochat-p2p/Cargo.toml
Normal file
12
crates/quicnprotochat-p2p/Cargo.toml
Normal file
@@ -0,0 +1,12 @@
|
||||
[package]
|
||||
name = "quicnprotochat-p2p"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "P2P transport layer for quicnprotochat using iroh."
|
||||
license = "MIT"
|
||||
|
||||
[dependencies]
|
||||
iroh = "0.96"
|
||||
tokio = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
189
crates/quicnprotochat-p2p/src/lib.rs
Normal file
189
crates/quicnprotochat-p2p/src/lib.rs
Normal file
@@ -0,0 +1,189 @@
|
||||
//! P2P transport layer for quicnprotochat using iroh.
|
||||
//!
|
||||
//! Provides direct peer-to-peer QUIC connections with NAT traversal via iroh
|
||||
//! relay servers. When both peers are online, messages bypass the central
|
||||
//! server entirely.
|
||||
//!
|
||||
//! # Architecture
|
||||
//!
|
||||
//! ```text
|
||||
//! Client A ── iroh direct (QUIC) ── Client B (preferred: low latency)
|
||||
//! │ │
|
||||
//! └── QUIC/TLS ── Server ── QUIC/TLS ┘ (fallback: store-and-forward)
|
||||
//! ```
|
||||
|
||||
use iroh::{Endpoint, EndpointAddr, PublicKey, SecretKey};
|
||||
|
||||
/// ALPN protocol identifier for quicnprotochat P2P messaging.
|
||||
const P2P_ALPN: &[u8] = b"quicnprotochat/p2p/1";
|
||||
|
||||
/// A P2P node backed by an iroh endpoint.
|
||||
///
|
||||
/// Manages direct QUIC connections to peers with automatic NAT traversal.
|
||||
pub struct P2pNode {
|
||||
endpoint: Endpoint,
|
||||
}
|
||||
|
||||
/// Received P2P message with sender information.
|
||||
pub struct P2pMessage {
|
||||
pub sender: PublicKey,
|
||||
pub payload: Vec<u8>,
|
||||
}
|
||||
|
||||
impl P2pNode {
|
||||
/// Start a new P2P node.
|
||||
///
|
||||
/// Generates a fresh identity or reuses a provided secret key.
|
||||
pub async fn start(secret_key: Option<SecretKey>) -> anyhow::Result<Self> {
|
||||
let mut builder = Endpoint::builder();
|
||||
if let Some(sk) = secret_key {
|
||||
builder = builder.secret_key(sk);
|
||||
}
|
||||
builder = builder.alpns(vec![P2P_ALPN.to_vec()]);
|
||||
|
||||
let endpoint = builder.bind().await?;
|
||||
|
||||
tracing::info!(
|
||||
node_id = %endpoint.id().fmt_short(),
|
||||
"P2P node started"
|
||||
);
|
||||
|
||||
Ok(Self { endpoint })
|
||||
}
|
||||
|
||||
/// This node's public key (used as node ID for peer discovery).
|
||||
pub fn node_id(&self) -> PublicKey {
|
||||
self.endpoint.id()
|
||||
}
|
||||
|
||||
/// This node's secret key (for persistence across restarts).
|
||||
pub fn secret_key(&self) -> SecretKey {
|
||||
self.endpoint.secret_key().clone()
|
||||
}
|
||||
|
||||
/// Get the node's network address information for publishing to discovery.
|
||||
pub fn endpoint_addr(&self) -> EndpointAddr {
|
||||
self.endpoint.addr()
|
||||
}
|
||||
|
||||
/// Send a payload directly to a peer via P2P QUIC.
|
||||
pub async fn send(&self, peer: impl Into<EndpointAddr>, payload: &[u8]) -> anyhow::Result<()> {
|
||||
let peer = peer.into();
|
||||
let conn = self.endpoint.connect(peer, P2P_ALPN).await?;
|
||||
|
||||
let mut send = conn.open_uni().await.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
|
||||
// Simple framing: 4-byte length prefix + payload.
|
||||
let len = (payload.len() as u32).to_be_bytes();
|
||||
send.write_all(&len)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
send.write_all(payload)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
send.finish().map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
// Wait until the peer has consumed the stream before dropping.
|
||||
send.stopped().await.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
|
||||
tracing::debug!(
|
||||
peer = %conn.remote_id().fmt_short(),
|
||||
bytes = payload.len(),
|
||||
"P2P message sent"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Accept a single incoming P2P message.
|
||||
///
|
||||
/// Blocks until a peer connects and sends data.
|
||||
pub async fn recv(&self) -> anyhow::Result<P2pMessage> {
|
||||
let incoming = self
|
||||
.endpoint
|
||||
.accept()
|
||||
.await
|
||||
.ok_or_else(|| anyhow::anyhow!("no more incoming connections"))?;
|
||||
|
||||
let conn = incoming.await.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
let sender = conn.remote_id();
|
||||
|
||||
let mut recv = conn
|
||||
.accept_uni()
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
|
||||
// Read length-prefixed payload.
|
||||
let mut len_buf = [0u8; 4];
|
||||
recv.read_exact(&mut len_buf)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
let len = u32::from_be_bytes(len_buf) as usize;
|
||||
|
||||
if len > 5 * 1024 * 1024 {
|
||||
anyhow::bail!("P2P payload too large: {len} bytes");
|
||||
}
|
||||
|
||||
let mut payload = vec![0u8; len];
|
||||
recv.read_exact(&mut payload)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e}"))?;
|
||||
|
||||
tracing::debug!(
|
||||
peer = %sender.fmt_short(),
|
||||
bytes = len,
|
||||
"P2P message received"
|
||||
);
|
||||
|
||||
Ok(P2pMessage { sender, payload })
|
||||
}
|
||||
|
||||
/// Gracefully shut down the P2P node.
|
||||
pub async fn close(self) {
|
||||
self.endpoint.close().await;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use iroh::RelayMode;
|
||||
|
||||
/// Create a local-only P2P node with relays disabled (for testing).
|
||||
async fn local_node() -> P2pNode {
|
||||
let endpoint = Endpoint::builder()
|
||||
.alpns(vec![P2P_ALPN.to_vec()])
|
||||
.relay_mode(RelayMode::Disabled)
|
||||
.bind()
|
||||
.await
|
||||
.unwrap();
|
||||
P2pNode { endpoint }
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn p2p_round_trip() {
|
||||
let alice = local_node().await;
|
||||
let bob = local_node().await;
|
||||
|
||||
let bob_addr = bob.endpoint_addr();
|
||||
let alice_id = alice.node_id();
|
||||
let payload = b"hello from alice via P2P";
|
||||
|
||||
// Spawn Bob's receiver.
|
||||
let bob_handle = tokio::spawn(async move {
|
||||
let msg = bob.recv().await.unwrap();
|
||||
assert_eq!(msg.payload, payload.to_vec());
|
||||
assert_eq!(msg.sender, alice_id);
|
||||
});
|
||||
|
||||
// Give Bob a moment to start accepting.
|
||||
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
|
||||
|
||||
alice.send(bob_addr, payload).await.unwrap();
|
||||
|
||||
// Wait for Bob to finish reading before closing.
|
||||
bob_handle.await.unwrap();
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
||||
alice.close().await;
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,8 @@
|
||||
//! Cap'n Proto schemas, generated types, and serialisation helpers for quicnprotochat.
|
||||
//!
|
||||
//! Generated Cap'n Proto code emits unnecessary parentheses; allow per coding standards.
|
||||
#![allow(unused_parens)]
|
||||
|
||||
//! # Design constraints
|
||||
//!
|
||||
//! This crate is intentionally restricted:
|
||||
|
||||
@@ -23,6 +23,7 @@ pub const E017_SESSION_EXPIRED: &str = "E017";
|
||||
pub const E018_USER_EXISTS: &str = "E018";
|
||||
pub const E019_NO_PENDING_LOGIN: &str = "E019";
|
||||
pub const E020_BAD_PARAMS: &str = "E020";
|
||||
pub const E021_CIPHERSUITE_NOT_ALLOWED: &str = "E021";
|
||||
|
||||
/// Build a `capnp::Error::failed()` with the structured code prefix.
|
||||
pub fn coded_error(code: &str, msg: impl std::fmt::Display) -> capnp::Error {
|
||||
|
||||
@@ -13,10 +13,15 @@
|
||||
//! The entire RPC stack lives on a `tokio::task::LocalSet` spawned per
|
||||
//! connection.
|
||||
|
||||
use std::{fs, net::SocketAddr, path::{Path, PathBuf}, sync::Arc, time::Duration};
|
||||
use std::{
|
||||
fs,
|
||||
net::SocketAddr,
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use serde::Deserialize;
|
||||
use capnp::capability::Promise;
|
||||
use capnp_rpc::{rpc_twoparty_capnp::Side, twoparty, RpcSystem};
|
||||
use clap::Parser;
|
||||
@@ -33,6 +38,7 @@ use rand::rngs::OsRng;
|
||||
use rcgen::generate_simple_self_signed;
|
||||
use rustls::pki_types::{CertificateDer, PrivateKeyDer};
|
||||
use rustls::version::TLS13;
|
||||
use serde::Deserialize;
|
||||
use sha2::{Digest, Sha256};
|
||||
use subtle::ConstantTimeEq;
|
||||
use tokio::sync::Notify;
|
||||
@@ -44,11 +50,11 @@ mod sql_store;
|
||||
mod storage;
|
||||
use error_codes::*;
|
||||
use sql_store::SqlStore;
|
||||
use storage::{FileBackedStore, Store, StorageError};
|
||||
use storage::{FileBackedStore, StorageError, Store};
|
||||
|
||||
const MAX_PAYLOAD_BYTES: usize = 5 * 1024 * 1024; // 5 MB cap per message
|
||||
const MAX_KEYPACKAGE_BYTES: usize = 1 * 1024 * 1024; // 1 MB cap per KeyPackage
|
||||
const CURRENT_WIRE_VERSION: u16 = 1; // legacy disabled; current wire version only
|
||||
const CURRENT_WIRE_VERSION: u16 = 1;
|
||||
|
||||
const DEFAULT_LISTEN: &str = "0.0.0.0:7000";
|
||||
const DEFAULT_DATA_DIR: &str = "data";
|
||||
@@ -71,7 +77,9 @@ struct AuthConfig {
|
||||
|
||||
impl AuthConfig {
|
||||
fn new(required_token: Option<String>) -> Self {
|
||||
let required_token = required_token.filter(|s| !s.is_empty()).map(|s| s.into_bytes());
|
||||
let required_token = required_token
|
||||
.filter(|s| !s.is_empty())
|
||||
.map(|s| s.into_bytes());
|
||||
Self { required_token }
|
||||
}
|
||||
}
|
||||
@@ -110,34 +118,42 @@ fn load_config(path: Option<&Path>) -> anyhow::Result<FileConfig> {
|
||||
return Ok(FileConfig::default());
|
||||
}
|
||||
|
||||
let contents = fs::read_to_string(&path)
|
||||
.with_context(|| format!("read config file {path:?}"))?;
|
||||
let cfg: FileConfig = toml::from_str(&contents)
|
||||
.with_context(|| format!("parse config file {path:?}"))?;
|
||||
let contents =
|
||||
fs::read_to_string(&path).with_context(|| format!("read config file {path:?}"))?;
|
||||
let cfg: FileConfig =
|
||||
toml::from_str(&contents).with_context(|| format!("parse config file {path:?}"))?;
|
||||
Ok(cfg)
|
||||
}
|
||||
|
||||
fn merge_config(args: &Args, file: &FileConfig) -> EffectiveConfig {
|
||||
let listen = if args.listen == DEFAULT_LISTEN {
|
||||
file.listen.clone().unwrap_or_else(|| DEFAULT_LISTEN.to_string())
|
||||
file.listen
|
||||
.clone()
|
||||
.unwrap_or_else(|| DEFAULT_LISTEN.to_string())
|
||||
} else {
|
||||
args.listen.clone()
|
||||
};
|
||||
|
||||
let data_dir = if args.data_dir == DEFAULT_DATA_DIR {
|
||||
file.data_dir.clone().unwrap_or_else(|| DEFAULT_DATA_DIR.to_string())
|
||||
file.data_dir
|
||||
.clone()
|
||||
.unwrap_or_else(|| DEFAULT_DATA_DIR.to_string())
|
||||
} else {
|
||||
args.data_dir.clone()
|
||||
};
|
||||
|
||||
let tls_cert = if args.tls_cert == PathBuf::from(DEFAULT_TLS_CERT) {
|
||||
file.tls_cert.clone().unwrap_or_else(|| PathBuf::from(DEFAULT_TLS_CERT))
|
||||
file.tls_cert
|
||||
.clone()
|
||||
.unwrap_or_else(|| PathBuf::from(DEFAULT_TLS_CERT))
|
||||
} else {
|
||||
args.tls_cert.clone()
|
||||
};
|
||||
|
||||
let tls_key = if args.tls_key == PathBuf::from(DEFAULT_TLS_KEY) {
|
||||
file.tls_key.clone().unwrap_or_else(|| PathBuf::from(DEFAULT_TLS_KEY))
|
||||
file.tls_key
|
||||
.clone()
|
||||
.unwrap_or_else(|| PathBuf::from(DEFAULT_TLS_KEY))
|
||||
} else {
|
||||
args.tls_key.clone()
|
||||
};
|
||||
@@ -231,7 +247,11 @@ struct Args {
|
||||
// ── Session management ──────────────────────────────────────────────────────
|
||||
|
||||
struct SessionInfo {
|
||||
/// For future audit logging.
|
||||
#[allow(dead_code)]
|
||||
username: String,
|
||||
/// For future audit logging.
|
||||
#[allow(dead_code)]
|
||||
identity_key: Vec<u8>,
|
||||
#[allow(dead_code)]
|
||||
created_at: u64,
|
||||
@@ -289,9 +309,12 @@ impl node_service::Server for NodeServiceImpl {
|
||||
params: node_service::UploadKeyPackageParams,
|
||||
mut results: node_service::UploadKeyPackageResults,
|
||||
) -> Promise<(), capnp::Error> {
|
||||
let params = params
|
||||
.get()
|
||||
.map_err(|e| coded_error(E020_BAD_PARAMS, format!("upload_key_package: bad params: {e}")));
|
||||
let params = params.get().map_err(|e| {
|
||||
coded_error(
|
||||
E020_BAD_PARAMS,
|
||||
format!("upload_key_package: bad params: {e}"),
|
||||
)
|
||||
});
|
||||
|
||||
let (identity_key, package) = match params {
|
||||
Ok(p) => {
|
||||
@@ -314,7 +337,10 @@ impl node_service::Server for NodeServiceImpl {
|
||||
if identity_key.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E004_IDENTITY_KEY_LENGTH,
|
||||
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
|
||||
format!(
|
||||
"identityKey must be exactly 32 bytes, got {}",
|
||||
identity_key.len()
|
||||
),
|
||||
));
|
||||
}
|
||||
if package.is_empty() {
|
||||
@@ -327,6 +353,14 @@ impl node_service::Server for NodeServiceImpl {
|
||||
));
|
||||
}
|
||||
|
||||
// Phase 2: ciphersuite allowlist — reject KeyPackages not using the allowed MLS ciphersuite.
|
||||
if let Err(e) = quicnprotochat_core::validate_keypackage_ciphersuite(&package) {
|
||||
return Promise::err(coded_error(
|
||||
E021_CIPHERSUITE_NOT_ALLOWED,
|
||||
format!("KeyPackage ciphersuite not allowed: {e}"),
|
||||
));
|
||||
}
|
||||
|
||||
let fingerprint: Vec<u8> = Sha256::digest(&package).to_vec();
|
||||
if let Err(e) = self
|
||||
.store
|
||||
@@ -371,7 +405,10 @@ impl node_service::Server for NodeServiceImpl {
|
||||
if identity_key.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E004_IDENTITY_KEY_LENGTH,
|
||||
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
|
||||
format!(
|
||||
"identityKey must be exactly 32 bytes, got {}",
|
||||
identity_key.len()
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
@@ -424,15 +461,19 @@ impl node_service::Server for NodeServiceImpl {
|
||||
};
|
||||
let channel_id = p.get_channel_id().unwrap_or_default().to_vec();
|
||||
let version = p.get_version();
|
||||
let auth_token = match validate_auth_return_token(&self.auth_cfg, &self.sessions, p.get_auth()) {
|
||||
Ok(t) => t,
|
||||
Err(e) => return Promise::err(e),
|
||||
};
|
||||
let auth_token =
|
||||
match validate_auth_return_token(&self.auth_cfg, &self.sessions, p.get_auth()) {
|
||||
Ok(t) => t,
|
||||
Err(e) => return Promise::err(e),
|
||||
};
|
||||
|
||||
if recipient_key.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E004_IDENTITY_KEY_LENGTH,
|
||||
format!("recipientKey must be exactly 32 bytes, got {}", recipient_key.len()),
|
||||
format!(
|
||||
"recipientKey must be exactly 32 bytes, got {}",
|
||||
recipient_key.len()
|
||||
),
|
||||
));
|
||||
}
|
||||
if payload.is_empty() {
|
||||
@@ -447,7 +488,10 @@ impl node_service::Server for NodeServiceImpl {
|
||||
if version != CURRENT_WIRE_VERSION {
|
||||
return Promise::err(coded_error(
|
||||
E012_WIRE_VERSION,
|
||||
format!("unsupported wire version {} (expected {CURRENT_WIRE_VERSION})", version),
|
||||
format!(
|
||||
"unsupported wire version {} (expected {CURRENT_WIRE_VERSION})",
|
||||
version
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
@@ -510,11 +554,7 @@ impl node_service::Server for NodeServiceImpl {
|
||||
.ok()
|
||||
.map(|p| p.get_version())
|
||||
.unwrap_or(CURRENT_WIRE_VERSION);
|
||||
let limit = params
|
||||
.get()
|
||||
.ok()
|
||||
.map(|p| p.get_limit())
|
||||
.unwrap_or(0);
|
||||
let limit = params.get().ok().map(|p| p.get_limit()).unwrap_or(0);
|
||||
if let Err(e) = params
|
||||
.get()
|
||||
.ok()
|
||||
@@ -527,23 +567,37 @@ impl node_service::Server for NodeServiceImpl {
|
||||
if recipient_key.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E004_IDENTITY_KEY_LENGTH,
|
||||
format!("recipientKey must be exactly 32 bytes, got {}", recipient_key.len()),
|
||||
format!(
|
||||
"recipientKey must be exactly 32 bytes, got {}",
|
||||
recipient_key.len()
|
||||
),
|
||||
));
|
||||
}
|
||||
if version != CURRENT_WIRE_VERSION {
|
||||
return Promise::err(coded_error(
|
||||
E012_WIRE_VERSION,
|
||||
format!("unsupported wire version {} (expected {CURRENT_WIRE_VERSION})", version),
|
||||
format!(
|
||||
"unsupported wire version {} (expected {CURRENT_WIRE_VERSION})",
|
||||
version
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
let messages = if limit > 0 {
|
||||
match self.store.fetch_limited(&recipient_key, &channel_id, limit as usize).map_err(storage_err) {
|
||||
match self
|
||||
.store
|
||||
.fetch_limited(&recipient_key, &channel_id, limit as usize)
|
||||
.map_err(storage_err)
|
||||
{
|
||||
Ok(m) => m,
|
||||
Err(e) => return Promise::err(e),
|
||||
}
|
||||
} else {
|
||||
match self.store.fetch(&recipient_key, &channel_id).map_err(storage_err) {
|
||||
match self
|
||||
.store
|
||||
.fetch(&recipient_key, &channel_id)
|
||||
.map_err(storage_err)
|
||||
{
|
||||
Ok(m) => m,
|
||||
Err(e) => return Promise::err(e),
|
||||
}
|
||||
@@ -588,13 +642,19 @@ impl node_service::Server for NodeServiceImpl {
|
||||
if recipient_key.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E004_IDENTITY_KEY_LENGTH,
|
||||
format!("recipientKey must be exactly 32 bytes, got {}", recipient_key.len()),
|
||||
format!(
|
||||
"recipientKey must be exactly 32 bytes, got {}",
|
||||
recipient_key.len()
|
||||
),
|
||||
));
|
||||
}
|
||||
if version != CURRENT_WIRE_VERSION {
|
||||
return Promise::err(coded_error(
|
||||
E012_WIRE_VERSION,
|
||||
format!("unsupported wire version {} (expected {CURRENT_WIRE_VERSION})", version),
|
||||
format!(
|
||||
"unsupported wire version {} (expected {CURRENT_WIRE_VERSION})",
|
||||
version
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
@@ -602,7 +662,11 @@ impl node_service::Server for NodeServiceImpl {
|
||||
let waiters = self.waiters.clone();
|
||||
|
||||
Promise::from_future(async move {
|
||||
let fetch_fn = |s: &Arc<dyn Store>, rk: &[u8], ch: &[u8], lim: u32| -> Result<Vec<Vec<u8>>, capnp::Error> {
|
||||
let fetch_fn = |s: &Arc<dyn Store>,
|
||||
rk: &[u8],
|
||||
ch: &[u8],
|
||||
lim: u32|
|
||||
-> Result<Vec<Vec<u8>>, capnp::Error> {
|
||||
if lim > 0 {
|
||||
s.fetch_limited(rk, ch, lim as usize).map_err(storage_err)
|
||||
} else {
|
||||
@@ -664,7 +728,10 @@ impl node_service::Server for NodeServiceImpl {
|
||||
if identity_key.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E004_IDENTITY_KEY_LENGTH,
|
||||
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
|
||||
format!(
|
||||
"identityKey must be exactly 32 bytes, got {}",
|
||||
identity_key.len()
|
||||
),
|
||||
));
|
||||
}
|
||||
if hybrid_pk.is_empty() {
|
||||
@@ -713,7 +780,10 @@ impl node_service::Server for NodeServiceImpl {
|
||||
if identity_key.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E004_IDENTITY_KEY_LENGTH,
|
||||
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
|
||||
format!(
|
||||
"identityKey must be exactly 32 bytes, got {}",
|
||||
identity_key.len()
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
@@ -767,7 +837,10 @@ impl node_service::Server for NodeServiceImpl {
|
||||
};
|
||||
|
||||
if username.is_empty() {
|
||||
return Promise::err(coded_error(E011_USERNAME_EMPTY, "username must not be empty"));
|
||||
return Promise::err(coded_error(
|
||||
E011_USERNAME_EMPTY,
|
||||
"username must not be empty",
|
||||
));
|
||||
}
|
||||
|
||||
let reg_request = match RegistrationRequest::<OpaqueSuite>::deserialize(&request_bytes) {
|
||||
@@ -821,7 +894,10 @@ impl node_service::Server for NodeServiceImpl {
|
||||
let identity_key = p.get_identity_key().unwrap_or_default().to_vec();
|
||||
|
||||
if username.is_empty() {
|
||||
return Promise::err(coded_error(E011_USERNAME_EMPTY, "username must not be empty"));
|
||||
return Promise::err(coded_error(
|
||||
E011_USERNAME_EMPTY,
|
||||
"username must not be empty",
|
||||
));
|
||||
}
|
||||
|
||||
// Fix 5: Registration collision check
|
||||
@@ -894,19 +970,22 @@ impl node_service::Server for NodeServiceImpl {
|
||||
};
|
||||
|
||||
if username.is_empty() {
|
||||
return Promise::err(coded_error(E011_USERNAME_EMPTY, "username must not be empty"));
|
||||
return Promise::err(coded_error(
|
||||
E011_USERNAME_EMPTY,
|
||||
"username must not be empty",
|
||||
));
|
||||
}
|
||||
|
||||
let credential_request =
|
||||
match CredentialRequest::<OpaqueSuite>::deserialize(&request_bytes) {
|
||||
Ok(r) => r,
|
||||
Err(e) => {
|
||||
return Promise::err(coded_error(
|
||||
E010_OPAQUE_ERROR,
|
||||
format!("invalid credential request: {e}"),
|
||||
))
|
||||
}
|
||||
};
|
||||
let credential_request = match CredentialRequest::<OpaqueSuite>::deserialize(&request_bytes)
|
||||
{
|
||||
Ok(r) => r,
|
||||
Err(e) => {
|
||||
return Promise::err(coded_error(
|
||||
E010_OPAQUE_ERROR,
|
||||
format!("invalid credential request: {e}"),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
// Load user's OPAQUE password file (if registered).
|
||||
let password_file = match self.store.get_user_record(&username) {
|
||||
@@ -978,7 +1057,10 @@ impl node_service::Server for NodeServiceImpl {
|
||||
let identity_key = p.get_identity_key().unwrap_or_default().to_vec();
|
||||
|
||||
if username.is_empty() {
|
||||
return Promise::err(coded_error(E011_USERNAME_EMPTY, "username must not be empty"));
|
||||
return Promise::err(coded_error(
|
||||
E011_USERNAME_EMPTY,
|
||||
"username must not be empty",
|
||||
));
|
||||
}
|
||||
|
||||
// Retrieve the pending ServerLogin state.
|
||||
@@ -1081,11 +1163,18 @@ impl node_service::Server for NodeServiceImpl {
|
||||
if identity_key.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E004_IDENTITY_KEY_LENGTH,
|
||||
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
|
||||
format!(
|
||||
"identityKey must be exactly 32 bytes, got {}",
|
||||
identity_key.len()
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
if let Err(e) = self.store.publish_endpoint(&identity_key, node_addr).map_err(storage_err) {
|
||||
if let Err(e) = self
|
||||
.store
|
||||
.publish_endpoint(&identity_key, node_addr)
|
||||
.map_err(storage_err)
|
||||
{
|
||||
return Promise::err(e);
|
||||
}
|
||||
|
||||
@@ -1113,11 +1202,18 @@ impl node_service::Server for NodeServiceImpl {
|
||||
if identity_key.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E004_IDENTITY_KEY_LENGTH,
|
||||
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
|
||||
format!(
|
||||
"identityKey must be exactly 32 bytes, got {}",
|
||||
identity_key.len()
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
match self.store.resolve_endpoint(&identity_key).map_err(storage_err) {
|
||||
match self
|
||||
.store
|
||||
.resolve_endpoint(&identity_key)
|
||||
.map_err(storage_err)
|
||||
{
|
||||
Ok(Some(addr)) => {
|
||||
results.get().set_node_addr(&addr);
|
||||
}
|
||||
@@ -1148,9 +1244,10 @@ fn check_rate_limit(
|
||||
token: &[u8],
|
||||
) -> Result<(), capnp::Error> {
|
||||
let now = current_timestamp();
|
||||
let mut entry = rate_limits
|
||||
.entry(token.to_vec())
|
||||
.or_insert(RateEntry { count: 0, window_start: now });
|
||||
let mut entry = rate_limits.entry(token.to_vec()).or_insert(RateEntry {
|
||||
count: 0,
|
||||
window_start: now,
|
||||
});
|
||||
|
||||
if now - entry.window_start >= RATE_LIMIT_WINDOW_SECS {
|
||||
entry.count = 1;
|
||||
@@ -1222,17 +1319,14 @@ fn validate_auth_return_token(
|
||||
// Expired — will be cleaned up by background task.
|
||||
drop(session);
|
||||
sessions.remove(&token);
|
||||
return Err(coded_error(E017_SESSION_EXPIRED, "session token has expired"));
|
||||
return Err(coded_error(
|
||||
E017_SESSION_EXPIRED,
|
||||
"session token has expired",
|
||||
));
|
||||
}
|
||||
|
||||
// If a static token is configured but neither matched, reject.
|
||||
if cfg.required_token.is_some() {
|
||||
return Err(coded_error(E003_INVALID_TOKEN, "invalid accessToken"));
|
||||
}
|
||||
|
||||
// No static token configured and no session match — accept any non-empty
|
||||
// token for backward compatibility (dev mode).
|
||||
Ok(token)
|
||||
// Require either static token or valid session; no legacy accept-any-token.
|
||||
Err(coded_error(E003_INVALID_TOKEN, "invalid accessToken"))
|
||||
}
|
||||
|
||||
// ── Entry point ───────────────────────────────────────────────────────────────
|
||||
@@ -1250,12 +1344,19 @@ async fn main() -> anyhow::Result<()> {
|
||||
let file_cfg = load_config(args.config.as_deref())?;
|
||||
let effective = merge_config(&args, &file_cfg);
|
||||
|
||||
let production = std::env::var("QUICNPROTOCHAT_PRODUCTION")
|
||||
.map(|v| matches!(v.to_lowercase().as_str(), "1" | "true" | "yes"))
|
||||
.unwrap_or(false);
|
||||
if production {
|
||||
validate_production_config(&effective)?;
|
||||
}
|
||||
|
||||
let listen: SocketAddr = effective
|
||||
.listen
|
||||
.parse()
|
||||
.context("--listen must be host:port")?;
|
||||
|
||||
let server_config = build_server_config(&effective.tls_cert, &effective.tls_key)
|
||||
let server_config = build_server_config(&effective.tls_cert, &effective.tls_key, production)
|
||||
.context("failed to build TLS/QUIC server config")?;
|
||||
|
||||
// Shared storage — persisted to disk for restart safety.
|
||||
@@ -1322,11 +1423,14 @@ async fn main() -> anyhow::Result<()> {
|
||||
pending_logins.retain(|_, pl| now - pl.created_at < PENDING_LOGIN_TTL_SECS);
|
||||
|
||||
// Expire stale rate limit entries (Fix 6)
|
||||
rate_limits.retain(|_, entry| now - entry.window_start < RATE_LIMIT_WINDOW_SECS * 2);
|
||||
rate_limits
|
||||
.retain(|_, entry| now - entry.window_start < RATE_LIMIT_WINDOW_SECS * 2);
|
||||
|
||||
// GC expired messages (Fix 7)
|
||||
match store.gc_expired_messages(MESSAGE_TTL_SECS) {
|
||||
Ok(n) if n > 0 => tracing::debug!(expired = n, "garbage collected expired messages"),
|
||||
Ok(n) if n > 0 => {
|
||||
tracing::debug!(expired = n, "garbage collected expired messages")
|
||||
}
|
||||
Err(e) => tracing::warn!(error = %e, "message GC failed"),
|
||||
_ => {}
|
||||
}
|
||||
@@ -1347,42 +1451,54 @@ async fn main() -> anyhow::Result<()> {
|
||||
local
|
||||
.run_until(async move {
|
||||
loop {
|
||||
let incoming = match endpoint.accept().await {
|
||||
Some(i) => i,
|
||||
None => break,
|
||||
};
|
||||
tokio::select! {
|
||||
biased;
|
||||
|
||||
let connecting = match incoming.accept() {
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
tracing::warn!(error = %e, "failed to accept incoming connection");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
incoming = endpoint.accept() => {
|
||||
let incoming = match incoming {
|
||||
Some(i) => i,
|
||||
None => break,
|
||||
};
|
||||
|
||||
let store = Arc::clone(&store);
|
||||
let waiters = Arc::clone(&waiters);
|
||||
let auth_cfg = Arc::clone(&auth_cfg);
|
||||
let opaque_setup = Arc::clone(&opaque_setup);
|
||||
let pending_logins = Arc::clone(&pending_logins);
|
||||
let sessions = Arc::clone(&sessions);
|
||||
let rate_limits = Arc::clone(&rate_limits);
|
||||
tokio::task::spawn_local(async move {
|
||||
if let Err(e) = handle_node_connection(
|
||||
connecting,
|
||||
store,
|
||||
waiters,
|
||||
auth_cfg,
|
||||
opaque_setup,
|
||||
pending_logins,
|
||||
sessions,
|
||||
rate_limits,
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "connection error");
|
||||
let connecting = match incoming.accept() {
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
tracing::warn!(error = %e, "failed to accept incoming connection");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let store = Arc::clone(&store);
|
||||
let waiters = Arc::clone(&waiters);
|
||||
let auth_cfg = Arc::clone(&auth_cfg);
|
||||
let opaque_setup = Arc::clone(&opaque_setup);
|
||||
let pending_logins = Arc::clone(&pending_logins);
|
||||
let sessions = Arc::clone(&sessions);
|
||||
let rate_limits = Arc::clone(&rate_limits);
|
||||
tokio::task::spawn_local(async move {
|
||||
if let Err(e) = handle_node_connection(
|
||||
connecting,
|
||||
store,
|
||||
waiters,
|
||||
auth_cfg,
|
||||
opaque_setup,
|
||||
pending_logins,
|
||||
sessions,
|
||||
rate_limits,
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "connection error");
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
tracing::info!("shutdown signal received, draining QUIC connections");
|
||||
endpoint.close(0u32.into(), b"server shutdown");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok::<(), anyhow::Error>(())
|
||||
@@ -1393,6 +1509,7 @@ async fn main() -> anyhow::Result<()> {
|
||||
// ── Per-connection handlers ───────────────────────────────────────────────────
|
||||
|
||||
/// Handle one NodeService connection.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn handle_node_connection(
|
||||
connecting: quinn::Connecting,
|
||||
store: Arc<dyn Store>,
|
||||
@@ -1438,9 +1555,45 @@ fn fmt_hex(bytes: &[u8]) -> String {
|
||||
format!("{hex}…")
|
||||
}
|
||||
|
||||
fn validate_production_config(effective: &EffectiveConfig) -> anyhow::Result<()> {
|
||||
let token = effective
|
||||
.auth_token
|
||||
.as_deref()
|
||||
.filter(|s| !s.is_empty())
|
||||
.ok_or_else(|| {
|
||||
anyhow::anyhow!("production requires QUICNPROTOCHAT_AUTH_TOKEN (non-empty)")
|
||||
})?;
|
||||
if token == "devtoken" {
|
||||
anyhow::bail!(
|
||||
"production forbids auth_token 'devtoken'; set a strong QUICNPROTOCHAT_AUTH_TOKEN"
|
||||
);
|
||||
}
|
||||
if effective.store_backend == "sql" && effective.db_key.is_empty() {
|
||||
anyhow::bail!("production with store_backend=sql requires non-empty QUICNPROTOCHAT_DB_KEY");
|
||||
}
|
||||
if !effective.tls_cert.exists() || !effective.tls_key.exists() {
|
||||
anyhow::bail!(
|
||||
"production requires existing TLS cert and key (no auto-generation); provide QUICNPROTOCHAT_TLS_CERT and QUICNPROTOCHAT_TLS_KEY"
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Ensure a self-signed certificate exists on disk and return a QUIC server config.
|
||||
fn build_server_config(cert_path: &PathBuf, key_path: &PathBuf) -> anyhow::Result<ServerConfig> {
|
||||
/// When `production` is true, cert and key must already exist (no auto-generation).
|
||||
fn build_server_config(
|
||||
cert_path: &PathBuf,
|
||||
key_path: &PathBuf,
|
||||
production: bool,
|
||||
) -> anyhow::Result<ServerConfig> {
|
||||
if !cert_path.exists() || !key_path.exists() {
|
||||
if production {
|
||||
anyhow::bail!(
|
||||
"TLS cert or key missing at {:?} / {:?}; production mode forbids auto-generation",
|
||||
cert_path,
|
||||
key_path
|
||||
);
|
||||
}
|
||||
generate_self_signed_cert(cert_path, key_path)?;
|
||||
}
|
||||
|
||||
|
||||
@@ -13,6 +13,12 @@ pub struct SqlStore {
|
||||
}
|
||||
|
||||
impl SqlStore {
|
||||
fn lock_conn(&self) -> Result<std::sync::MutexGuard<'_, Connection>, StorageError> {
|
||||
self.conn
|
||||
.lock()
|
||||
.map_err(|e| StorageError::Db(format!("lock poisoned: {e}")))
|
||||
}
|
||||
|
||||
pub fn open(path: impl AsRef<Path>, key: &str) -> Result<Self, StorageError> {
|
||||
let conn = Connection::open(path).map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
@@ -36,7 +42,7 @@ impl SqlStore {
|
||||
}
|
||||
|
||||
fn migrate(&self) -> Result<(), StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let conn = self.lock_conn()?;
|
||||
conn.execute_batch(
|
||||
"CREATE TABLE IF NOT EXISTS key_packages (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
@@ -97,7 +103,7 @@ impl Store for SqlStore {
|
||||
identity_key: &[u8],
|
||||
package: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let conn = self.lock_conn()?;
|
||||
conn.execute(
|
||||
"INSERT INTO key_packages (identity_key, package_data) VALUES (?1, ?2)",
|
||||
params![identity_key, package],
|
||||
@@ -107,7 +113,7 @@ impl Store for SqlStore {
|
||||
}
|
||||
|
||||
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let conn = self.lock_conn()?;
|
||||
|
||||
let mut stmt = conn
|
||||
.prepare(
|
||||
@@ -141,7 +147,7 @@ impl Store for SqlStore {
|
||||
channel_id: &[u8],
|
||||
payload: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let conn = self.lock_conn()?;
|
||||
conn.execute(
|
||||
"INSERT INTO deliveries (recipient_key, channel_id, payload) VALUES (?1, ?2, ?3)",
|
||||
params![recipient_key, channel_id, payload],
|
||||
@@ -150,12 +156,8 @@ impl Store for SqlStore {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn fetch(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<Vec<Vec<u8>>, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
fn fetch(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<Vec<Vec<u8>>, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
|
||||
let mut stmt = conn
|
||||
.prepare(
|
||||
@@ -177,8 +179,10 @@ impl Store for SqlStore {
|
||||
let ids: Vec<i64> = rows.iter().map(|(id, _)| *id).collect();
|
||||
let placeholders: String = ids.iter().map(|_| "?").collect::<Vec<_>>().join(",");
|
||||
let sql = format!("DELETE FROM deliveries WHERE id IN ({placeholders})");
|
||||
let params: Vec<&dyn rusqlite::types::ToSql> =
|
||||
ids.iter().map(|id| id as &dyn rusqlite::types::ToSql).collect();
|
||||
let params: Vec<&dyn rusqlite::types::ToSql> = ids
|
||||
.iter()
|
||||
.map(|id| id as &dyn rusqlite::types::ToSql)
|
||||
.collect();
|
||||
conn.execute(&sql, params.as_slice())
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
}
|
||||
@@ -192,7 +196,7 @@ impl Store for SqlStore {
|
||||
channel_id: &[u8],
|
||||
limit: usize,
|
||||
) -> Result<Vec<Vec<u8>>, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let conn = self.lock_conn()?;
|
||||
|
||||
let mut stmt = conn
|
||||
.prepare(
|
||||
@@ -215,8 +219,10 @@ impl Store for SqlStore {
|
||||
let ids: Vec<i64> = rows.iter().map(|(id, _)| *id).collect();
|
||||
let placeholders: String = ids.iter().map(|_| "?").collect::<Vec<_>>().join(",");
|
||||
let sql = format!("DELETE FROM deliveries WHERE id IN ({placeholders})");
|
||||
let params: Vec<&dyn rusqlite::types::ToSql> =
|
||||
ids.iter().map(|id| id as &dyn rusqlite::types::ToSql).collect();
|
||||
let params: Vec<&dyn rusqlite::types::ToSql> = ids
|
||||
.iter()
|
||||
.map(|id| id as &dyn rusqlite::types::ToSql)
|
||||
.collect();
|
||||
conn.execute(&sql, params.as_slice())
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
}
|
||||
@@ -224,12 +230,8 @@ impl Store for SqlStore {
|
||||
Ok(rows.into_iter().map(|(_, payload)| payload).collect())
|
||||
}
|
||||
|
||||
fn queue_depth(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<usize, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
fn queue_depth(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<usize, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
let count: i64 = conn
|
||||
.query_row(
|
||||
"SELECT COUNT(*) FROM deliveries WHERE recipient_key = ?1 AND channel_id = ?2",
|
||||
@@ -241,7 +243,7 @@ impl Store for SqlStore {
|
||||
}
|
||||
|
||||
fn gc_expired_messages(&self, max_age_secs: u64) -> Result<usize, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let conn = self.lock_conn()?;
|
||||
let cutoff = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
@@ -261,7 +263,7 @@ impl Store for SqlStore {
|
||||
identity_key: &[u8],
|
||||
hybrid_pk: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let conn = self.lock_conn()?;
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO hybrid_keys (identity_key, hybrid_public_key) VALUES (?1, ?2)",
|
||||
params![identity_key, hybrid_pk],
|
||||
@@ -271,7 +273,7 @@ impl Store for SqlStore {
|
||||
}
|
||||
|
||||
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let conn = self.lock_conn()?;
|
||||
let mut stmt = conn
|
||||
.prepare("SELECT hybrid_public_key FROM hybrid_keys WHERE identity_key = ?1")
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
@@ -282,7 +284,7 @@ impl Store for SqlStore {
|
||||
}
|
||||
|
||||
fn store_server_setup(&self, setup: Vec<u8>) -> Result<(), StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let conn = self.lock_conn()?;
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO server_setup (id, setup_data) VALUES (1, ?1)",
|
||||
params![setup],
|
||||
@@ -292,7 +294,7 @@ impl Store for SqlStore {
|
||||
}
|
||||
|
||||
fn get_server_setup(&self) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let conn = self.lock_conn()?;
|
||||
let mut stmt = conn
|
||||
.prepare("SELECT setup_data FROM server_setup WHERE id = 1")
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
@@ -303,7 +305,7 @@ impl Store for SqlStore {
|
||||
}
|
||||
|
||||
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let conn = self.lock_conn()?;
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO users (username, opaque_record) VALUES (?1, ?2)",
|
||||
params![username, record],
|
||||
@@ -313,7 +315,7 @@ impl Store for SqlStore {
|
||||
}
|
||||
|
||||
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let conn = self.lock_conn()?;
|
||||
let mut stmt = conn
|
||||
.prepare("SELECT opaque_record FROM users WHERE username = ?1")
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
@@ -324,7 +326,7 @@ impl Store for SqlStore {
|
||||
}
|
||||
|
||||
fn has_user_record(&self, username: &str) -> Result<bool, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let conn = self.lock_conn()?;
|
||||
let exists: bool = conn
|
||||
.query_row(
|
||||
"SELECT EXISTS(SELECT 1 FROM users WHERE username = ?1)",
|
||||
@@ -340,7 +342,7 @@ impl Store for SqlStore {
|
||||
username: &str,
|
||||
identity_key: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let conn = self.lock_conn()?;
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO user_identity_keys (username, identity_key) VALUES (?1, ?2)",
|
||||
params![username, identity_key],
|
||||
@@ -350,7 +352,7 @@ impl Store for SqlStore {
|
||||
}
|
||||
|
||||
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let conn = self.lock_conn()?;
|
||||
let mut stmt = conn
|
||||
.prepare("SELECT identity_key FROM user_identity_keys WHERE username = ?1")
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
@@ -365,7 +367,7 @@ impl Store for SqlStore {
|
||||
identity_key: &[u8],
|
||||
node_addr: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let conn = self.lock_conn()?;
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO endpoints (identity_key, node_addr) VALUES (?1, ?2)",
|
||||
params![identity_key, node_addr],
|
||||
@@ -375,7 +377,7 @@ impl Store for SqlStore {
|
||||
}
|
||||
|
||||
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let conn = self.lock_conn()?;
|
||||
let mut stmt = conn
|
||||
.prepare("SELECT node_addr FROM endpoints WHERE identity_key = ?1")
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
@@ -481,7 +483,9 @@ mod tests {
|
||||
fn has_user_record_check() {
|
||||
let store = open_in_memory();
|
||||
assert!(!store.has_user_record("alice").unwrap());
|
||||
store.store_user_record("alice", b"record".to_vec()).unwrap();
|
||||
store
|
||||
.store_user_record("alice", b"record".to_vec())
|
||||
.unwrap();
|
||||
assert!(store.has_user_record("alice").unwrap());
|
||||
assert!(!store.has_user_record("bob").unwrap());
|
||||
}
|
||||
@@ -490,8 +494,13 @@ mod tests {
|
||||
fn user_identity_key_round_trip() {
|
||||
let store = open_in_memory();
|
||||
assert!(store.get_user_identity_key("alice").unwrap().is_none());
|
||||
store.store_user_identity_key("alice", vec![1u8; 32]).unwrap();
|
||||
assert_eq!(store.get_user_identity_key("alice").unwrap(), Some(vec![1u8; 32]));
|
||||
store
|
||||
.store_user_identity_key("alice", vec![1u8; 32])
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
store.get_user_identity_key("alice").unwrap(),
|
||||
Some(vec![1u8; 32])
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -18,15 +18,17 @@ pub enum StorageError {
|
||||
Db(String),
|
||||
}
|
||||
|
||||
fn lock<T>(m: &Mutex<T>) -> Result<std::sync::MutexGuard<'_, T>, StorageError> {
|
||||
m.lock()
|
||||
.map_err(|e| StorageError::Io(format!("lock poisoned: {e}")))
|
||||
}
|
||||
|
||||
// ── Store trait ──────────────────────────────────────────────────────────────
|
||||
|
||||
/// Abstraction over storage backends (file-backed, SQLCipher, etc.).
|
||||
pub trait Store: Send + Sync {
|
||||
fn upload_key_package(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
package: Vec<u8>,
|
||||
) -> Result<(), StorageError>;
|
||||
fn upload_key_package(&self, identity_key: &[u8], package: Vec<u8>)
|
||||
-> Result<(), StorageError>;
|
||||
|
||||
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
@@ -37,11 +39,7 @@ pub trait Store: Send + Sync {
|
||||
payload: Vec<u8>,
|
||||
) -> Result<(), StorageError>;
|
||||
|
||||
fn fetch(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<Vec<Vec<u8>>, StorageError>;
|
||||
fn fetch(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<Vec<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Fetch up to `limit` messages without draining the entire queue (Fix 8).
|
||||
fn fetch_limited(
|
||||
@@ -52,11 +50,7 @@ pub trait Store: Send + Sync {
|
||||
) -> Result<Vec<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Return the number of queued messages for (recipient, channel) (Fix 7).
|
||||
fn queue_depth(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<usize, StorageError>;
|
||||
fn queue_depth(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<usize, StorageError>;
|
||||
|
||||
/// Delete messages older than `max_age_secs`. Returns count deleted (Fix 7).
|
||||
fn gc_expired_messages(&self, max_age_secs: u64) -> Result<usize, StorageError>;
|
||||
@@ -95,11 +89,8 @@ pub trait Store: Send + Sync {
|
||||
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Publish a P2P endpoint address for an identity key.
|
||||
fn publish_endpoint(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
node_addr: Vec<u8>,
|
||||
) -> Result<(), StorageError>;
|
||||
fn publish_endpoint(&self, identity_key: &[u8], node_addr: Vec<u8>)
|
||||
-> Result<(), StorageError>;
|
||||
|
||||
/// Resolve a peer's P2P endpoint address.
|
||||
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
@@ -210,7 +201,9 @@ impl FileBackedStore {
|
||||
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
||||
}
|
||||
|
||||
fn load_delivery_map(path: &Path) -> Result<HashMap<ChannelKey, VecDeque<Vec<u8>>>, StorageError> {
|
||||
fn load_delivery_map(
|
||||
path: &Path,
|
||||
) -> Result<HashMap<ChannelKey, VecDeque<Vec<u8>>>, StorageError> {
|
||||
if !path.exists() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
@@ -218,22 +211,9 @@ impl FileBackedStore {
|
||||
if bytes.is_empty() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
// Try v2 format (channel-aware). Fallback to legacy v1 for upgrade.
|
||||
if let Ok(map) = bincode::deserialize::<QueueMapV2>(&bytes) {
|
||||
return Ok(map.map);
|
||||
}
|
||||
let legacy: QueueMapV1 = bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)?;
|
||||
let mut upgraded = HashMap::new();
|
||||
for (recipient_key, queue) in legacy.map.into_iter() {
|
||||
upgraded.insert(
|
||||
ChannelKey {
|
||||
channel_id: Vec::new(),
|
||||
recipient_key,
|
||||
},
|
||||
queue,
|
||||
);
|
||||
}
|
||||
Ok(upgraded)
|
||||
bincode::deserialize::<QueueMapV2>(&bytes)
|
||||
.map(|v| v.map)
|
||||
.map_err(|_| StorageError::Io("deliveries file: v1 format no longer supported; delete or migrate".into()))
|
||||
}
|
||||
|
||||
fn flush_delivery_map(
|
||||
@@ -283,11 +263,7 @@ impl FileBackedStore {
|
||||
bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)
|
||||
}
|
||||
|
||||
fn flush_users(
|
||||
&self,
|
||||
path: &Path,
|
||||
map: &HashMap<String, Vec<u8>>,
|
||||
) -> Result<(), StorageError> {
|
||||
fn flush_users(&self, path: &Path, map: &HashMap<String, Vec<u8>>) -> Result<(), StorageError> {
|
||||
let bytes = bincode::serialize(map).map_err(|_| StorageError::Serde)?;
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
@@ -314,7 +290,7 @@ impl Store for FileBackedStore {
|
||||
identity_key: &[u8],
|
||||
package: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = self.key_packages.lock().unwrap();
|
||||
let mut map = lock(&self.key_packages)?;
|
||||
map.entry(identity_key.to_vec())
|
||||
.or_default()
|
||||
.push_back(package);
|
||||
@@ -322,7 +298,7 @@ impl Store for FileBackedStore {
|
||||
}
|
||||
|
||||
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let mut map = self.key_packages.lock().unwrap();
|
||||
let mut map = lock(&self.key_packages)?;
|
||||
let package = map.get_mut(identity_key).and_then(|q| q.pop_front());
|
||||
self.flush_kp_map(&self.kp_path, &*map)?;
|
||||
Ok(package)
|
||||
@@ -334,23 +310,17 @@ impl Store for FileBackedStore {
|
||||
channel_id: &[u8],
|
||||
payload: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = self.deliveries.lock().unwrap();
|
||||
let mut map = lock(&self.deliveries)?;
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
map.entry(key)
|
||||
.or_default()
|
||||
.push_back(payload);
|
||||
map.entry(key).or_default().push_back(payload);
|
||||
self.flush_delivery_map(&self.ds_path, &*map)
|
||||
}
|
||||
|
||||
fn fetch(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<Vec<Vec<u8>>, StorageError> {
|
||||
let mut map = self.deliveries.lock().unwrap();
|
||||
fn fetch(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<Vec<Vec<u8>>, StorageError> {
|
||||
let mut map = lock(&self.deliveries)?;
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
@@ -369,7 +339,7 @@ impl Store for FileBackedStore {
|
||||
channel_id: &[u8],
|
||||
limit: usize,
|
||||
) -> Result<Vec<Vec<u8>>, StorageError> {
|
||||
let mut map = self.deliveries.lock().unwrap();
|
||||
let mut map = lock(&self.deliveries)?;
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
@@ -385,12 +355,8 @@ impl Store for FileBackedStore {
|
||||
Ok(messages)
|
||||
}
|
||||
|
||||
fn queue_depth(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<usize, StorageError> {
|
||||
let map = self.deliveries.lock().unwrap();
|
||||
fn queue_depth(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<usize, StorageError> {
|
||||
let map = lock(&self.deliveries)?;
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
@@ -408,13 +374,13 @@ impl Store for FileBackedStore {
|
||||
identity_key: &[u8],
|
||||
hybrid_pk: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = self.hybrid_keys.lock().unwrap();
|
||||
let mut map = lock(&self.hybrid_keys)?;
|
||||
map.insert(identity_key.to_vec(), hybrid_pk);
|
||||
self.flush_hybrid_keys(&self.hk_path, &*map)
|
||||
}
|
||||
|
||||
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let map = self.hybrid_keys.lock().unwrap();
|
||||
let map = lock(&self.hybrid_keys)?;
|
||||
Ok(map.get(identity_key).cloned())
|
||||
}
|
||||
|
||||
@@ -437,18 +403,18 @@ impl Store for FileBackedStore {
|
||||
}
|
||||
|
||||
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError> {
|
||||
let mut map = self.users.lock().unwrap();
|
||||
let mut map = lock(&self.users)?;
|
||||
map.insert(username.to_string(), record);
|
||||
self.flush_users(&self.users_path, &*map)
|
||||
}
|
||||
|
||||
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let map = self.users.lock().unwrap();
|
||||
let map = lock(&self.users)?;
|
||||
Ok(map.get(username).cloned())
|
||||
}
|
||||
|
||||
fn has_user_record(&self, username: &str) -> Result<bool, StorageError> {
|
||||
let map = self.users.lock().unwrap();
|
||||
let map = lock(&self.users)?;
|
||||
Ok(map.contains_key(username))
|
||||
}
|
||||
|
||||
@@ -457,13 +423,13 @@ impl Store for FileBackedStore {
|
||||
username: &str,
|
||||
identity_key: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = self.identity_keys.lock().unwrap();
|
||||
let mut map = lock(&self.identity_keys)?;
|
||||
map.insert(username.to_string(), identity_key);
|
||||
self.flush_map_string_bytes(&self.identity_keys_path, &*map)
|
||||
}
|
||||
|
||||
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let map = self.identity_keys.lock().unwrap();
|
||||
let map = lock(&self.identity_keys)?;
|
||||
Ok(map.get(username).cloned())
|
||||
}
|
||||
|
||||
@@ -472,13 +438,13 @@ impl Store for FileBackedStore {
|
||||
identity_key: &[u8],
|
||||
node_addr: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = self.endpoints.lock().unwrap();
|
||||
let mut map = lock(&self.endpoints)?;
|
||||
map.insert(identity_key.to_vec(), node_addr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let map = self.endpoints.lock().unwrap();
|
||||
let map = lock(&self.endpoints)?;
|
||||
Ok(map.get(identity_key).cloned())
|
||||
}
|
||||
}
|
||||
|
||||
29
deny.toml
Normal file
29
deny.toml
Normal file
@@ -0,0 +1,29 @@
|
||||
# cargo-deny configuration for license compatibility and duplicate detection.
|
||||
# See https://embarkstudios.github.io/cargo-deny/
|
||||
|
||||
[advisories]
|
||||
db-path = "~/.cargo/advisory-db"
|
||||
db-urls = ["https://github.com/rustsec/advisory-db"]
|
||||
vulnerability = "deny"
|
||||
unmaintained = "warn"
|
||||
yanked = "deny"
|
||||
notice = "warn"
|
||||
|
||||
[bans]
|
||||
multiple-versions = "warn"
|
||||
wildcards = "allow"
|
||||
highlight = "all"
|
||||
|
||||
[licenses]
|
||||
unlicensed = "deny"
|
||||
allow = ["MIT", "Apache-2.0", "BSD-2-Clause", "BSD-3-Clause"]
|
||||
deny = []
|
||||
copyleft = "warn"
|
||||
default = "deny"
|
||||
allow-osi-fsf-free = "both"
|
||||
|
||||
[sources]
|
||||
unknown-registry = "deny"
|
||||
unknown-git = "deny"
|
||||
allow-registry = ["https://github.com/rust-lang/crates.io-index"]
|
||||
allow-git = []
|
||||
@@ -16,6 +16,7 @@ COPY crates/quicnprotochat-core/Cargo.toml crates/quicnprotochat-core/Cargo.to
|
||||
COPY crates/quicnprotochat-proto/Cargo.toml crates/quicnprotochat-proto/Cargo.toml
|
||||
COPY crates/quicnprotochat-server/Cargo.toml crates/quicnprotochat-server/Cargo.toml
|
||||
COPY crates/quicnprotochat-client/Cargo.toml crates/quicnprotochat-client/Cargo.toml
|
||||
COPY crates/quicnprotochat-p2p/Cargo.toml crates/quicnprotochat-p2p/Cargo.toml
|
||||
|
||||
# Create dummy source files so `cargo build` can resolve the dependency graph
|
||||
# and cache the compiled dependencies before copying real source.
|
||||
@@ -24,10 +25,12 @@ RUN mkdir -p \
|
||||
crates/quicnprotochat-proto/src \
|
||||
crates/quicnprotochat-server/src \
|
||||
crates/quicnprotochat-client/src \
|
||||
crates/quicnprotochat-p2p/src \
|
||||
&& echo 'fn main() {}' > crates/quicnprotochat-server/src/main.rs \
|
||||
&& echo 'fn main() {}' > crates/quicnprotochat-client/src/main.rs \
|
||||
&& touch crates/quicnprotochat-core/src/lib.rs \
|
||||
&& touch crates/quicnprotochat-proto/src/lib.rs
|
||||
&& touch crates/quicnprotochat-proto/src/lib.rs \
|
||||
&& touch crates/quicnprotochat-p2p/src/lib.rs
|
||||
|
||||
# Schemas must exist before the proto crate's build.rs runs.
|
||||
COPY schemas/ schemas/
|
||||
@@ -38,10 +41,11 @@ RUN cargo build --release --bin quicnprotochat-server 2>/dev/null || true
|
||||
# Copy real source and build for real.
|
||||
COPY crates/ crates/
|
||||
|
||||
# Touch main.rs files to force re-compilation of the binary crates.
|
||||
# Touch source to force re-compilation after copying real crates.
|
||||
RUN touch \
|
||||
crates/quicnprotochat-core/src/lib.rs \
|
||||
crates/quicnprotochat-proto/src/lib.rs \
|
||||
crates/quicnprotochat-p2p/src/lib.rs \
|
||||
crates/quicnprotochat-server/src/main.rs \
|
||||
crates/quicnprotochat-client/src/main.rs
|
||||
|
||||
|
||||
127
docs/PRODUCTION-READINESS-AUDIT.md
Normal file
127
docs/PRODUCTION-READINESS-AUDIT.md
Normal file
@@ -0,0 +1,127 @@
|
||||
# Production Readiness Audit
|
||||
|
||||
This document summarizes issues and fixes needed to get quicnprotochat production-ready, based on a codebase review. It aligns with the existing [Production Readiness WBS](src/roadmap/production-readiness.md) and [Coding Standards](src/contributing/coding-standards.md).
|
||||
|
||||
---
|
||||
|
||||
## Critical (fix before production)
|
||||
|
||||
### 1. **Auth token and dev defaults**
|
||||
|
||||
- **README and example config** use `auth_token = "devtoken"` and `db_key = ""`.
|
||||
- **Risk:** Deploying with default/example config allows weak or no auth and unencrypted DB.
|
||||
- **Fix:** Require explicit `QUICNPROTOCHAT_AUTH_TOKEN` (or config) in production; reject empty or `"devtoken"` when a production mode/env is set. Document that `db_key` empty disables SQLCipher and is not acceptable for production.
|
||||
|
||||
### 2. **Database encryption optional**
|
||||
|
||||
- **`sql_store.rs`:** If `db_key` is empty, SQLCipher is not applied; DB is plaintext on disk.
|
||||
- **Fix:** In production, require non-empty `db_key` (or fail startup with a clear error). Document in README and deployment docs.
|
||||
|
||||
### 3. **Secrets and generated files not ignored**
|
||||
|
||||
- **`.gitignore`** does not include `data/`, so `data/server-cert.der`, `data/server-key.der`, and `data/quicnprotochat.db` could be committed.
|
||||
- **Fix:** Add `data/` (and any other dirs that hold certs, keys, or DBs) to `.gitignore`. Consider adding `*.der` and `*.db` if used only for local/dev.
|
||||
|
||||
### 4. **Dockerfile out of sync with workspace**
|
||||
|
||||
- **Workspace** has 5 members including `crates/quicnprotochat-p2p`.
|
||||
- **Dockerfile** only copies 4 crate manifests and creates stub dirs for those 4; it never copies `quicnprotochat-p2p`.
|
||||
- **Result:** `cargo build --release --bin quicnprotochat-server` can fail (missing workspace member) or behave inconsistently.
|
||||
- **Fix:** Add `COPY crates/quicnprotochat-p2p/Cargo.toml` and a stub `crates/quicnprotochat-p2p/src` (or equivalent) in the dependency-cache layer so the workspace resolves. Ensure the final `COPY crates/ crates/` still brings in real p2p source.
|
||||
|
||||
### 5. **E2E test failing (rustls CryptoProvider)**
|
||||
|
||||
- **Symptom:** `e2e_happy_path_register_invite_join_send_recv` panics: *"Could not automatically determine the process-level CryptoProvider"*.
|
||||
- **Cause:** rustls 0.23 requires a default `CryptoProvider` (e.g. `ring` or `aws-lc-rs`). In the test process, nothing calls `CryptoProvider::install_default()` before the client uses QUIC/rustls.
|
||||
- **Fix:** In the E2E test (or in a shared test harness), call `rustls::crypto::ring::default_provider().install_default().ok()` (or the chosen provider) once at process start before any QUIC/rustls usage. Ensure the crate has exactly one of the `ring` / `aws-lc-rs` features so the default is unambiguous.
|
||||
|
||||
---
|
||||
|
||||
## High (security and reliability)
|
||||
|
||||
### 6. **Panic risk in client RPC path**
|
||||
|
||||
- **`quicnprotochat-client/src/lib.rs`:** `set_auth()` uses `.expect("init_auth must be called with a non-empty token before RPCs")`. If RPC is called without `init_auth`, the process panics.
|
||||
- **Fix:** Replace with a `Result` or an error return (e.g. a dedicated error type) so callers get a recoverable error instead of a panic. Document that `init_auth` must be called before RPCs.
|
||||
|
||||
### 7. **Mutex `.unwrap()` in production paths**
|
||||
|
||||
- **`sql_store.rs`:** All `self.conn.lock().unwrap()` calls can panic if the mutex is poisoned.
|
||||
- **`storage.rs` (file backend):** Same pattern with `.lock().unwrap()` on shared maps.
|
||||
- **Coding standards:** Prefer handling `Result` from `lock()` (e.g. `lock().map_err(...)?`) or use a type that encapsulates poisoning so production paths don’t panic on contention/poison.
|
||||
|
||||
### 8. **`unwrap()` in client library**
|
||||
|
||||
- **`lib.rs`:** `"0.0.0.0:0".parse().unwrap()` for the client endpoint. If parsing ever changed or failed, this would panic.
|
||||
- **Fix:** Use `.context("parse client bind address")?` (or equivalent) so this is a proper error path.
|
||||
|
||||
### 9. **TLS certificate generation is silent on first run**
|
||||
|
||||
- **Server** auto-generates a self-signed cert if files are missing. Production readiness WBS says: *"Self-signed certificates acceptable for development; production deployments must use a CA-signed certificate or certificate pinning."*
|
||||
- **Fix:** Add a startup check (e.g. env or config flag) that in production rejects auto-generation and requires existing cert/key paths. Log clearly when running with self-signed certs so operators know they’re in dev mode.
|
||||
|
||||
---
|
||||
|
||||
## Medium (hygiene and ops)
|
||||
|
||||
### 10. **No CI pipeline**
|
||||
|
||||
- **Production Readiness WBS** expects: GitHub Actions with `cargo test --workspace`, `cargo clippy`, `cargo fmt --check`, `cargo deny check`.
|
||||
- **Current state:** No `.github/workflows` (or equivalent) found.
|
||||
- **Fix:** Add a CI workflow that runs tests, clippy, fmt, and deny so every PR is validated.
|
||||
|
||||
### 11. **No CODEOWNERS**
|
||||
|
||||
- WBS requires CODEOWNERS for review ownership and security-sensitive changes.
|
||||
- **Fix:** Add `.github/CODEOWNERS` mapping crates to owners.
|
||||
|
||||
### 12. **No dependency audit in CI**
|
||||
|
||||
- WBS mentions `cargo audit` in CI.
|
||||
- **Fix:** Add a CI job that runs `cargo audit` (and optionally `cargo deny check`) and fails on known vulnerabilities or policy violations.
|
||||
|
||||
### 13. **No `deny.toml` / `deny.toml` config**
|
||||
|
||||
- Coding standards reference `cargo deny check`; no config file was found.
|
||||
- **Fix:** Add `deny.toml` (or equivalent) and run `cargo deny check` in CI.
|
||||
|
||||
### 14. **Warnings in build**
|
||||
|
||||
- **Cap'n Proto generated code:** `unused_parens` in generated `.rs` files. Standards allow `#[allow(...)]` on generated code; consider suppressing in the codegen output or in the crate that includes it.
|
||||
- **Server:** `SessionInfo` has `username` and `identity_key` never read (dead code). Either use them (e.g. audit logging) or remove/allow with a short comment.
|
||||
- **E2E test:** Deprecated `cargo_bin`, `unused_mut`; trivial to fix.
|
||||
- **openmls:** Future-incompat warning; track upstream and plan upgrade.
|
||||
|
||||
### 15. **Docker image runs as `nobody`**
|
||||
|
||||
- **Dockerfile** uses `USER nobody`. Good for not running as root, but `nobody` may not have a writable home or data dir.
|
||||
- **Fix:** Ensure `QUICNPROTOCHAT_DATA_DIR` (and cert paths) point to a directory writable by `nobody`, or create a dedicated user/group with a known UID and use that in the Dockerfile and docs.
|
||||
|
||||
---
|
||||
|
||||
## Already in good shape
|
||||
|
||||
- **Auth token comparison:** Uses `subtle::ConstantTimeEq` (`ct_eq`) for the static token — good.
|
||||
- **Input validation:** Recipient key length (32), payload size (5 MB), wire version, rate limiting, queue depth — present and consistent.
|
||||
- **Structured logging:** `tracing` with env filter; no secret material in log messages in the reviewed paths.
|
||||
- **Error handling:** RPC handlers return coded errors; no `unwrap()` on crypto in server RPC paths.
|
||||
- **Health endpoint:** Server exposes a health RPC used by E2E and can be used for readiness probes.
|
||||
|
||||
---
|
||||
|
||||
## Summary checklist
|
||||
|
||||
| Area | Status | Action |
|
||||
|-------------------|----------|--------|
|
||||
| Auth / tokens | Fix | Require strong auth in prod; document devtoken / empty db_key |
|
||||
| DB encryption | Fix | Require non-empty db_key in production |
|
||||
| .gitignore | Fix | Add `data/` (and cert/DB patterns as needed) |
|
||||
| Dockerfile | Fix | Include p2p crate in workspace build |
|
||||
| E2E test | Fix | Set rustls CryptoProvider in test harness |
|
||||
| Client panic | Improve | Replace expect with Result in set_auth |
|
||||
| Mutex unwrap | Improve | Handle poison or use non-panicking API |
|
||||
| TLS in production| Improve | Reject auto-generated cert in prod mode |
|
||||
| CI / CODEOWNERS | Add | GitHub Actions, deny, audit, CODEOWNERS |
|
||||
| Warnings | Clean up | Dead code, deprecated APIs, generated allows |
|
||||
|
||||
This audit should be revisited after implementing Phase 1–2 of the [Production Readiness WBS](src/roadmap/production-readiness.md) and before any production deployment.
|
||||
@@ -4,6 +4,13 @@
|
||||
|
||||
---
|
||||
|
||||
# Why quicnprotochat?
|
||||
|
||||
- [Comparison with Classical Chat Protocols](design-rationale/protocol-comparison.md)
|
||||
- [Why This Design, Not Signal/Matrix/...](design-rationale/why-not-signal.md)
|
||||
|
||||
---
|
||||
|
||||
# Getting Started
|
||||
|
||||
- [Prerequisites](getting-started/prerequisites.md)
|
||||
@@ -60,7 +67,6 @@
|
||||
# Design Rationale
|
||||
|
||||
- [Design Decisions Overview](design-rationale/overview.md)
|
||||
- [Why This Design, Not Signal/Matrix/...](design-rationale/why-not-signal.md)
|
||||
- [ADR-002: Cap'n Proto over MessagePack](design-rationale/adr-002-capnproto.md)
|
||||
- [ADR-004: MLS-Unaware Delivery Service](design-rationale/adr-004-mls-unaware-ds.md)
|
||||
- [ADR-005: Single-Use KeyPackages](design-rationale/adr-005-single-use-keypackages.md)
|
||||
@@ -80,6 +86,7 @@
|
||||
# Roadmap and Research
|
||||
|
||||
- [Milestone Tracker](roadmap/milestones.md)
|
||||
- [Phase 2 + M4–M6 Roadmap](roadmap/phase2-and-m4-m6.md)
|
||||
- [Production Readiness WBS](roadmap/production-readiness.md)
|
||||
- [Auth, Devices, and Tokens](roadmap/authz-plan.md)
|
||||
- [1:1 Channel Design](roadmap/dm-channels.md)
|
||||
|
||||
524
docs/src/design-rationale/protocol-comparison.md
Normal file
524
docs/src/design-rationale/protocol-comparison.md
Normal file
@@ -0,0 +1,524 @@
|
||||
# Comparison with Classical Chat Protocols
|
||||
|
||||
This page compares quicnprotochat against **classical and legacy chat protocols** -- IRC+SSL, XMPP (with and without OMEMO), Telegram's MTProto, and plain TCP/TLS chat systems -- to demonstrate what a modern, cryptographically rigorous design provides over protocols that were designed before end-to-end encryption, post-compromise security, and post-quantum readiness were practical concerns.
|
||||
|
||||
For a comparison against modern E2E-encrypted protocols (Signal, Matrix/Olm/Megolm), see [Why This Design, Not Signal/Matrix/...](why-not-signal.md).
|
||||
|
||||
---
|
||||
|
||||
## At a glance
|
||||
|
||||
```
|
||||
Classical IRC+SSL quicnprotochat
|
||||
───────────────── ──────────────
|
||||
|
||||
You ──TLS──▶ Server ──TLS──▶ Bob You ──QUIC/TLS──▶ Server ──QUIC/TLS──▶ Bob
|
||||
│ │
|
||||
reads your sees only opaque
|
||||
plaintext MLS ciphertext
|
||||
messages (cannot decrypt)
|
||||
```
|
||||
|
||||
The fundamental difference: **classical protocols trust the server with your plaintext**. quicnprotochat's server is cryptographically excluded from reading message content.
|
||||
|
||||
---
|
||||
|
||||
## Protocol comparison matrix
|
||||
|
||||
| Property | IRC+SSL | XMPP+TLS | XMPP+OMEMO | Telegram (MTProto) | quicnprotochat |
|
||||
|---|---|---|---|---|---|
|
||||
| **Transport encryption** | TLS (server-to-server optional) | STARTTLS / direct TLS | STARTTLS / direct TLS | MTProto 2.0 (custom) | QUIC + TLS 1.3 |
|
||||
| **End-to-end encryption** | None | None | Double Ratchet (1:1) | "Secret chats" only (1:1) | MLS RFC 9420 (groups native) |
|
||||
| **Group E2E encryption** | None | None | Partial (OMEMO group) | None (cloud chats) | MLS ratchet tree |
|
||||
| **Forward secrecy** | TLS session only | TLS session only | Yes (Double Ratchet) | Secret chats only | Yes (MLS epoch ratchet + TLS) |
|
||||
| **Post-compromise security** | None | None | None (groups) | None | Yes (MLS Update proposals) |
|
||||
| **Server sees plaintext** | Yes | Yes | No (1:1); partial (groups) | Yes (cloud chats) | Never |
|
||||
| **Post-quantum readiness** | None | None | None | None | Hybrid KEM (X25519 + ML-KEM-768) |
|
||||
| **Group operation cost** | N/A (no E2E) | N/A (no E2E) | O(n) per member | N/A (no group E2E) | O(log n) via ratchet tree |
|
||||
| **Wire format** | Text (RFC 1459) | XML | XML + Protobuf | TL (Type Language) | Cap'n Proto (zero-copy) |
|
||||
| **Standardization** | RFC 1459 / RFC 2812 | RFC 6120 / 6121 | XEP-0384 | Proprietary | IETF RFC 9420 (MLS) |
|
||||
| **Authentication** | SASL / NickServ | SASL / TLS client certs | SASL + device fingerprints | Phone number + SMS | OPAQUE PAKE (password never leaves client) |
|
||||
|
||||
---
|
||||
|
||||
## Deep dive: IRC+SSL vs. quicnprotochat
|
||||
|
||||
IRC (Internet Relay Chat) is the archetypal chat protocol, designed in 1988. Adding SSL/TLS wraps the TCP connection in transport encryption, but the protocol's security model remains fundamentally unchanged.
|
||||
|
||||
### What happens when Alice sends a message on IRC+SSL
|
||||
|
||||
```
|
||||
┌───────┐ ┌──────────┐ ┌──────────┐ ┌─────┐
|
||||
│ Alice │──TLS───▶│ Server A │──plain──▶│ Server B │──TLS───▶│ Bob │
|
||||
└───────┘ └──────────┘ └──────────┘ └─────┘
|
||||
│ │
|
||||
Sees: "PRIVMSG Sees: "PRIVMSG
|
||||
#secret :hey Bob, #secret :hey Bob,
|
||||
the password is the password is
|
||||
hunter2" hunter2"
|
||||
```
|
||||
|
||||
**Problems:**
|
||||
|
||||
1. **Server reads all plaintext.** The IRC server receives, parses, and forwards every message in cleartext. TLS only protects the client-to-server hop.
|
||||
2. **Server-to-server links may be unencrypted.** IRC federation uses inter-server links that historically lack TLS. Even with modern IRCd configurations, each server in the network sees every message.
|
||||
3. **No forward secrecy beyond TLS session.** If a server's TLS private key is compromised, a passive attacker who recorded past traffic can decrypt all historical sessions (unless ECDHE was negotiated).
|
||||
4. **No post-compromise security.** There is no mechanism to recover from a key compromise. If a server is breached, all messages flowing through it are exposed indefinitely.
|
||||
5. **No identity binding.** NickServ password authentication is plaintext over the IRC protocol (inside TLS, but visible to the server). There is no cryptographic binding between a user's identity and their messages.
|
||||
|
||||
### What happens when Alice sends a message on quicnprotochat
|
||||
|
||||
```
|
||||
┌───────┐ ┌────────┐ ┌─────┐
|
||||
│ Alice │──QUIC/TLS 1.3─────▶│ Server │──QUIC/TLS 1.3─────▶│ Bob │
|
||||
└───────┘ └────────┘ └─────┘
|
||||
│ │ │
|
||||
│ MLS encrypt( │ Sees only: │ MLS decrypt(
|
||||
│ epoch_key, │ 0x8a3f...c7b2 │ epoch_key,
|
||||
│ "hey Bob, │ (opaque blob, │ ciphertext
|
||||
│ the password │ cannot decrypt) │ ) → "hey Bob,
|
||||
│ is hunter2" │ │ the password
|
||||
│ ) → 0x8a3f...c7b2 │ │ is hunter2"
|
||||
│ │ │
|
||||
│ ◄── epoch advances ──► │ │
|
||||
│ old keys deleted │ │ old keys deleted
|
||||
│ (forward secrecy) │ │ (forward secrecy)
|
||||
```
|
||||
|
||||
**Key differences:**
|
||||
|
||||
- The server handles only **opaque ciphertext**. It cannot decrypt, modify, or selectively censor messages.
|
||||
- Each MLS epoch derives fresh keys. Past epoch keys are **deleted** -- even if the server is fully compromised, historical messages remain encrypted.
|
||||
- If Alice's device is compromised at epoch *n*, a single Update proposal heals the ratchet tree. Messages after epoch *n+1* are protected (**post-compromise security**).
|
||||
|
||||
---
|
||||
|
||||
## Deep dive: XMPP+OMEMO vs. quicnprotochat
|
||||
|
||||
XMPP with OMEMO (XEP-0384) adds end-to-end encryption via the Signal Double Ratchet protocol. This is a significant improvement over plain XMPP, but OMEMO inherits the limitations of the Signal Protocol for group messaging.
|
||||
|
||||
### Group messaging comparison
|
||||
|
||||
```
|
||||
XMPP + OMEMO group (4 members)
|
||||
|
||||
Alice encrypts separately for each member:
|
||||
┌───────┐ ── encrypt(Bob_key) ──────▶ Bob
|
||||
│ Alice │ ── encrypt(Carol_key) ────▶ Carol
|
||||
└───────┘ ── encrypt(Dave_key) ─────▶ Dave
|
||||
3 encryptions per message
|
||||
O(n) cost per send
|
||||
|
||||
quicnprotochat MLS group (4 members)
|
||||
|
||||
Alice encrypts once with group epoch key:
|
||||
┌───────┐ ── MLS_encrypt(epoch_key) ──▶ Server
|
||||
│ Alice │ 1 encryption per message │
|
||||
└───────┘ O(1) cost per send ├──▶ Bob
|
||||
├──▶ Carol
|
||||
└──▶ Dave
|
||||
(all decrypt with same epoch key)
|
||||
```
|
||||
|
||||
| Property | XMPP+OMEMO groups | quicnprotochat MLS groups |
|
||||
|---|---|---|
|
||||
| **Encryption per message** | O(n) -- encrypt once per recipient | O(1) -- single MLS application message |
|
||||
| **Add member** | O(n) -- distribute sender keys to all | O(log n) -- single MLS Commit |
|
||||
| **Remove member** | O(n) -- rotate all sender keys | O(log n) -- single MLS Commit |
|
||||
| **Post-compromise security** | No (sender keys have no PCS) | Yes (any member can issue Update) |
|
||||
| **Group state consistency** | No formal guarantee | MLS transcript hash ensures all members see identical state |
|
||||
| **Max practical group size** | ~100 (pairwise overhead) | Thousands (log-scaling ratchet tree) |
|
||||
|
||||
---
|
||||
|
||||
## Deep dive: Telegram (MTProto) vs. quicnprotochat
|
||||
|
||||
Telegram is often perceived as a "secure" messenger, but its default mode provides **no end-to-end encryption**. Only "Secret Chats" (1:1 only, not available on desktop) use E2E encryption.
|
||||
|
||||
### Telegram's two modes
|
||||
|
||||
```
|
||||
┌──────────────────────────────────────────────────────────────────┐
|
||||
│ Telegram Cloud Chats │
|
||||
│ (default, all platforms) │
|
||||
│ │
|
||||
│ You ──MTProto──▶ Telegram Server ──MTProto──▶ Recipient │
|
||||
│ │ │
|
||||
│ Server decrypts, │
|
||||
│ stores plaintext, │
|
||||
│ indexes for search, │
|
||||
│ processes for features │
|
||||
│ (synced across devices) │
|
||||
└──────────────────────────────────────────────────────────────────┘
|
||||
|
||||
┌──────────────────────────────────────────────────────────────────┐
|
||||
│ Telegram Secret Chats │
|
||||
│ (1:1 only, mobile only, opt-in) │
|
||||
│ │
|
||||
│ You ──DH key exchange──▶ Recipient │
|
||||
│ (no PCS, no FS beyond initial DH, │
|
||||
│ no group support, proprietary crypto) │
|
||||
└──────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Comparison
|
||||
|
||||
| Property | Telegram Cloud Chats | Telegram Secret Chats | quicnprotochat |
|
||||
|---|---|---|---|
|
||||
| **Server reads plaintext** | Yes | No | No |
|
||||
| **Group E2E** | No | N/A (1:1 only) | Yes (MLS) |
|
||||
| **Forward secrecy** | None | Limited (no ratchet) | Full (MLS epoch ratchet) |
|
||||
| **Post-compromise security** | None | None | Yes |
|
||||
| **Cryptographic standard** | MTProto 2.0 (proprietary, custom) | MTProto 2.0 | IETF RFC 9420 (peer-reviewed) |
|
||||
| **Open source server** | No | No | Yes (MIT license) |
|
||||
| **Post-quantum** | None | None | Hybrid KEM (X25519 + ML-KEM-768) |
|
||||
|
||||
**Critical concern with Telegram:** MTProto is a custom, proprietary cryptographic protocol that has not undergone the same level of independent cryptographic review as standard protocols (TLS, MLS, Signal Protocol). Multiple academic papers have identified weaknesses in earlier versions. quicnprotochat exclusively uses IETF-standardized protocols (TLS 1.3, MLS RFC 9420) and widely reviewed cryptographic primitives.
|
||||
|
||||
---
|
||||
|
||||
## Practical attack scenarios
|
||||
|
||||
The following scenarios illustrate how the same attack plays out differently across protocol designs.
|
||||
|
||||
### Scenario 1: Server compromise
|
||||
|
||||
An attacker gains root access to the chat server.
|
||||
|
||||
```
|
||||
Attacker
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────────────────────┐
|
||||
│ Chat Server │
|
||||
├──────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ IRC+SSL: Full access to all messages. │
|
||||
│ Read history, impersonate users, │
|
||||
│ inject messages. │
|
||||
│ │
|
||||
│ XMPP+TLS: Full access to all messages. │
|
||||
│ Same as IRC. │
|
||||
│ │
|
||||
│ Telegram: Full access to cloud chat │
|
||||
│ plaintext. User photos, contacts, │
|
||||
│ message history all exposed. │
|
||||
│ │
|
||||
│ XMPP+OMEMO: Cannot read E2E messages, but │
|
||||
│ sees metadata (who talks to whom, │
|
||||
│ when, message sizes). │
|
||||
│ │
|
||||
│ quicnprotochat: │
|
||||
│ Cannot read messages (MLS E2E). │
|
||||
│ Sees metadata (recipient keys, │
|
||||
│ timing, sizes). │
|
||||
│ Cannot inject valid messages │
|
||||
│ (lacks MLS group keys). │
|
||||
│ Cannot impersonate users │
|
||||
│ (lacks Ed25519 private keys). │
|
||||
│ Past messages remain encrypted │
|
||||
│ (forward secrecy). │
|
||||
│ Future messages protected after │
|
||||
│ any member issues MLS Update │
|
||||
│ (post-compromise security). │
|
||||
│ │
|
||||
└──────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Scenario 2: Harvest-now, decrypt-later (quantum threat)
|
||||
|
||||
A state-level adversary records all encrypted traffic today, planning to decrypt it with a future quantum computer.
|
||||
|
||||
```
|
||||
2025: Adversary passively records all ciphertext
|
||||
─────────────────────────────────────────────────
|
||||
|
||||
IRC+SSL (RSA/ECDHE):
|
||||
└── Quantum computer breaks ECDHE → all recorded sessions decrypted
|
||||
(and plaintext was already visible on the server anyway)
|
||||
|
||||
XMPP+OMEMO (X25519):
|
||||
└── Quantum computer breaks X25519 → all recorded E2E messages decrypted
|
||||
|
||||
Telegram (MTProto / custom DH):
|
||||
└── Quantum computer breaks DH → all recorded messages decrypted
|
||||
|
||||
quicnprotochat (Hybrid KEM):
|
||||
└── Transport: QUIC/TLS with ECDHE → quantum computer breaks this layer
|
||||
└── Inner layer: MLS content encrypted with group epoch keys
|
||||
└── Hybrid KEM envelope: X25519 + ML-KEM-768
|
||||
└── Quantum computer breaks X25519 ✓
|
||||
└── Quantum computer breaks ML-KEM-768 ✗ (NIST Level 3, ~192-bit PQ)
|
||||
└── Combined key: STILL SECURE (both must be broken)
|
||||
```
|
||||
|
||||
quicnprotochat's hybrid "belt and suspenders" design means that **even if X25519 falls to a quantum computer, ML-KEM-768 protects the content**. The adversary's recorded ciphertext remains useless.
|
||||
|
||||
### Scenario 3: Device theft / compromise
|
||||
|
||||
An attacker steals Alice's unlocked device and extracts her key material.
|
||||
|
||||
```
|
||||
After device compromise at time T:
|
||||
────────────────────────────────────
|
||||
|
||||
IRC+SSL:
|
||||
Messages before T: visible on server (no E2E)
|
||||
Messages after T: visible on server (no E2E)
|
||||
Recovery: change NickServ password (server-side only)
|
||||
|
||||
XMPP+OMEMO:
|
||||
Messages before T: protected (forward secrecy via Double Ratchet)
|
||||
Messages after T: exposed until sender key is rotated
|
||||
Group messages: no PCS -- attacker reads all future group messages
|
||||
until manual re-keying
|
||||
Recovery: manual device revocation + new sender keys
|
||||
|
||||
Telegram (cloud):
|
||||
Messages before T: all accessible (stored on server in plaintext)
|
||||
Messages after T: all accessible (cloud sync)
|
||||
Recovery: terminate session from another device
|
||||
|
||||
quicnprotochat:
|
||||
Messages before T: protected (MLS forward secrecy, past epoch keys deleted)
|
||||
Messages after T: exposed only until next MLS epoch advance
|
||||
Recovery: ANY group member issues an MLS Update proposal →
|
||||
new epoch key derived → attacker locked out
|
||||
(post-compromise security heals automatically)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Transport layer comparison
|
||||
|
||||
### Why QUIC over TCP
|
||||
|
||||
Classical protocols (IRC, XMPP) use TCP, which suffers from head-of-line (HOL) blocking. quicnprotochat uses QUIC, which provides independent streams over UDP.
|
||||
|
||||
```
|
||||
TCP (IRC/XMPP): all streams share one ordered byte stream
|
||||
─────────────────────────────────────────────────────────
|
||||
|
||||
Stream A: ████████░░░░████████████ (blocked waiting for
|
||||
Stream B: ░░░░░░░░░░░░████████████ lost packet in A)
|
||||
Stream C: ░░░░░░░░░░░░████████████
|
||||
|
||||
Lost packet ──▲
|
||||
in Stream A │
|
||||
└── ALL streams blocked until retransmit
|
||||
|
||||
|
||||
QUIC (quicnprotochat): each stream is independent
|
||||
──────────────────────────────────────────────────
|
||||
|
||||
Stream A: ████████░░██████████████ (only A waits)
|
||||
Stream B: ████████████████████████ (unaffected)
|
||||
Stream C: ████████████████████████ (unaffected)
|
||||
|
||||
Lost packet ──▲
|
||||
in Stream A │
|
||||
└── Only Stream A waits; B and C continue
|
||||
```
|
||||
|
||||
### Connection establishment
|
||||
|
||||
```
|
||||
IRC+SSL: TCP handshake (1 RTT) + TLS handshake (1-2 RTT) = 2-3 RTT
|
||||
──────────────────────────────────────────────────────────────────────
|
||||
Client ──SYN──▶ Server │
|
||||
Client ◀──SYN-ACK── Server │ TCP: 1 RTT
|
||||
Client ──ACK──▶ Server │
|
||||
Client ──ClientHello──▶ Server │
|
||||
Client ◀──ServerHello+Cert── Server │ TLS: 1-2 RTT
|
||||
Client ──Finished──▶ Server │
|
||||
════════════════════════════════════════════════════
|
||||
Total: 2-3 round trips before first message
|
||||
|
||||
quicnprotochat: QUIC integrates crypto into handshake = 1 RTT (or 0-RTT)
|
||||
──────────────────────────────────────────────────────────────────────────
|
||||
Client ──Initial(ClientHello)──▶ Server │
|
||||
Client ◀──Initial(ServerHello)── Server │ 1 RTT total
|
||||
Client ──Handshake(Finished)──▶ Server │
|
||||
════════════════════════════════════════════════════
|
||||
Total: 1 round trip (0-RTT with session resumption)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Authentication comparison
|
||||
|
||||
### How users prove identity
|
||||
|
||||
```
|
||||
IRC:
|
||||
NICK alice
|
||||
PASS hunter2 ← password sent in plaintext (inside TLS)
|
||||
(NickServ sees password) ← server stores/verifies password hash
|
||||
|
||||
XMPP:
|
||||
SASL PLAIN: base64(alice:hunter2) ← password sent to server
|
||||
(server verifies against stored hash)
|
||||
|
||||
Telegram:
|
||||
Phone number + SMS OTP ← carrier and Telegram see phone number
|
||||
(identity = phone number) ← no cryptographic identity
|
||||
|
||||
quicnprotochat (OPAQUE PAKE):
|
||||
Client ──blinded_element──▶ Server │ Server never sees password
|
||||
Client ◀──evaluated_element── Server │ Mutual authentication
|
||||
Client ──finalization──▶ Server │ Session key derived
|
||||
│ │
|
||||
└── password never leaves the client │
|
||||
server stores only an opaque │
|
||||
cryptographic record │
|
||||
(Argon2id + Ristretto255) │
|
||||
```
|
||||
|
||||
**OPAQUE** (Oblivious Pseudo-Random Function + Authenticated Key Exchange) ensures that:
|
||||
|
||||
1. The server **never sees the plaintext password** -- not during registration, not during login.
|
||||
2. The server stores only a cryptographic record that cannot be used for offline dictionary attacks without the client's cooperation.
|
||||
3. **Argon2id** key stretching makes brute-force attacks memory-hard.
|
||||
4. The login protocol produces a mutually authenticated session key, not just a server-verified credential.
|
||||
|
||||
---
|
||||
|
||||
## Wire format efficiency
|
||||
|
||||
```
|
||||
IRC message (RFC 1459):
|
||||
┌──────────────────────────────────────────────────────────┐
|
||||
│ :alice!alice@host PRIVMSG #channel :Hello everyone\r\n │
|
||||
│ │
|
||||
│ 56 bytes for 14 bytes of payload ("Hello everyone") │
|
||||
│ Text parsing required. No schema. No type safety. │
|
||||
│ Ambiguous parsing rules (RFC 1459 vs RFC 2812 conflicts) │
|
||||
└──────────────────────────────────────────────────────────┘
|
||||
|
||||
XMPP message (XML):
|
||||
┌──────────────────────────────────────────────────────────┐
|
||||
│ <message to='bob@example.com' type='chat'> │
|
||||
│ <body>Hello everyone</body> │
|
||||
│ </message> │
|
||||
│ │
|
||||
│ ~120 bytes for 14 bytes of payload │
|
||||
│ XML parsing required (expensive). Verbose. │
|
||||
│ Schema via XSD exists but rarely enforced at runtime. │
|
||||
└──────────────────────────────────────────────────────────┘
|
||||
|
||||
Cap'n Proto (quicnprotochat):
|
||||
┌──────────────────────────────────────────────────────────┐
|
||||
│ [8-byte aligned struct with pointers] │
|
||||
│ │
|
||||
│ ~40 bytes for 14 bytes of payload │
|
||||
│ Zero-copy: wire bytes = memory layout. No parsing step. │
|
||||
│ Schema enforced at compile time via capnpc codegen. │
|
||||
│ Canonical form: deterministic bytes for signing. │
|
||||
│ Built-in async RPC (no separate HTTP/gRPC layer). │
|
||||
└──────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Security properties summary
|
||||
|
||||
The following diagram maps each protocol against the security properties it provides:
|
||||
|
||||
```
|
||||
FS PCS E2E E2E PQ Zero Server IETF
|
||||
(1:1) (grp) (1:1) (grp) ready trust excluded std
|
||||
│ │ │ │ │ │ │ │
|
||||
IRC+SSL · · · · · · · ·
|
||||
XMPP+TLS · · · · · · · ·
|
||||
XMPP+OMEMO ● · ● △ · ● · ·
|
||||
Telegram Cloud · · · · · · · ·
|
||||
Telegram Secret △ · ● · · ● · ·
|
||||
Signal ● · ● ● △ ● · ·
|
||||
quicnprotochat ● ● ● ● ● ● ● ●
|
||||
|
||||
Legend: ● = yes △ = partial · = no
|
||||
FS = forward secrecy PCS = post-compromise security
|
||||
E2E = end-to-end encryption PQ = post-quantum readiness
|
||||
Zero trust = server excluded from crypto
|
||||
Server excluded = server cannot read, modify, or forge messages
|
||||
IETF std = based on IETF-standardized protocol (RFC)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## The quicnprotochat advantage: a layered defense
|
||||
|
||||
Classical protocols rely on a **single layer** of security (transport TLS). quicnprotochat applies defense in depth with **three independent layers**, each of which must be broken separately:
|
||||
|
||||
```
|
||||
IRC+SSL security layers: quicnprotochat security layers:
|
||||
|
||||
┌─────────────────────────┐ ┌─────────────────────────────────┐
|
||||
│ TLS (transport) │ │ Layer 3: Hybrid KEM envelope │
|
||||
│ • server sees plain │ │ • X25519 + ML-KEM-768 │
|
||||
│ • single point of │ │ • post-quantum resistant │
|
||||
│ failure │ │ • both must be broken │
|
||||
└─────────────────────────┘ ├─────────────────────────────────┤
|
||||
│ Layer 2: MLS (RFC 9420) │
|
||||
│ • end-to-end group encryption │
|
||||
│ • forward secrecy per epoch │
|
||||
│ • post-compromise security │
|
||||
│ • ratchet tree (O(log n)) │
|
||||
├─────────────────────────────────┤
|
||||
│ Layer 1: QUIC + TLS 1.3 │
|
||||
│ • transport confidentiality │
|
||||
│ • 0-RTT resumption │
|
||||
│ • no head-of-line blocking │
|
||||
│ • multiplexed streams │
|
||||
└─────────────────────────────────┘
|
||||
|
||||
To read a message, attacker must break:
|
||||
IRC+SSL: TLS (1 layer)
|
||||
quicnprotochat: TLS + MLS + Hybrid KEM (3 layers)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## When would you still choose IRC?
|
||||
|
||||
Fairness demands acknowledging where classical protocols genuinely excel:
|
||||
|
||||
| Advantage | IRC | quicnprotochat |
|
||||
|---|---|---|
|
||||
| **Simplicity** | Telnet-compatible text protocol | Binary protocol requiring client implementation |
|
||||
| **Maturity** | 35+ years of production use | Early-stage research project |
|
||||
| **Federation** | Built-in multi-server mesh | Single server per deployment |
|
||||
| **Client ecosystem** | Hundreds of clients on every platform | CLI only (currently) |
|
||||
| **Low resource usage** | Runs on minimal hardware | Requires modern TLS/QUIC stack |
|
||||
| **Public channels** | Designed for open, unencrypted discussion | Designed for private, encrypted communication |
|
||||
| **Anonymity** | No identity required | Requires Ed25519 identity keypair |
|
||||
|
||||
IRC remains an excellent choice for **public, open discussion** where encryption is not needed and simplicity is valued. quicnprotochat is designed for a different threat model: private communication where **confidentiality, forward secrecy, and post-compromise security** are requirements, not luxuries.
|
||||
|
||||
---
|
||||
|
||||
## Migration path: what changes for users
|
||||
|
||||
For users and operators coming from classical chat systems, here is what changes practically:
|
||||
|
||||
| Concern | Classical (IRC/XMPP) | quicnprotochat |
|
||||
|---|---|---|
|
||||
| **Server setup** | Install IRCd, configure TLS cert | `cargo build && ./quicnprotochat-server` (auto-generates TLS cert) |
|
||||
| **Client setup** | Install any IRC client | `./quicnprotochat-client register-user` (generates Ed25519 identity) |
|
||||
| **Joining a group** | `/join #channel` | Receive MLS Welcome message from group creator |
|
||||
| **Sending a message** | Type and press enter | Same -- client handles MLS encryption transparently |
|
||||
| **Server admin sees messages** | Yes (always) | No (never -- server sees only ciphertext) |
|
||||
| **Key management** | None (password only) | Automatic -- MLS handles key rotation, epoch advancement |
|
||||
| **Device compromise recovery** | Change password | Any group member issues Update -- automatic PCS recovery |
|
||||
| **Logging / compliance** | Server-side logging trivial | Requires client-side export (server has no plaintext) |
|
||||
|
||||
---
|
||||
|
||||
## Further reading
|
||||
|
||||
- [Why This Design, Not Signal/Matrix/...](why-not-signal.md) -- comparison with modern E2E-encrypted protocols
|
||||
- [Protocol Layers Overview](../protocol-layers/overview.md) -- detailed protocol stack documentation
|
||||
- [Threat Model](../cryptography/threat-model.md) -- what quicnprotochat does and does not protect against
|
||||
- [Post-Quantum Readiness](../cryptography/post-quantum-readiness.md) -- hybrid KEM design and rationale
|
||||
- [MLS (RFC 9420)](../protocol-layers/mls.md) -- deep dive into the group key agreement protocol
|
||||
- [Architecture Overview](../architecture/overview.md) -- system-level architecture
|
||||
@@ -64,6 +64,7 @@ For a deeper discussion of the cryptographic guarantees, threat model, and known
|
||||
|
||||
| Section | What you will find |
|
||||
|---|---|
|
||||
| **[Comparison with Classical Protocols](design-rationale/protocol-comparison.md)** | **Why quicnprotochat? IRC+SSL, XMPP, Telegram vs. our design** |
|
||||
| [Prerequisites](getting-started/prerequisites.md) | Toolchain and system dependencies |
|
||||
| [Building from Source](getting-started/building.md) | `cargo build`, Cap'n Proto codegen, troubleshooting |
|
||||
| [Running the Server](getting-started/running-the-server.md) | Server startup, configuration, TLS cert generation |
|
||||
@@ -74,7 +75,7 @@ For a deeper discussion of the cryptographic guarantees, threat model, and known
|
||||
| [Protocol Layers](protocol-layers/overview.md) | Deep dives into QUIC/TLS, Cap'n Proto, MLS, Hybrid KEM |
|
||||
| [Wire Format Reference](wire-format/overview.md) | Cap'n Proto schema documentation |
|
||||
| [Cryptography](cryptography/overview.md) | Identity keys, key lifecycle, forward secrecy, PCS, threat model |
|
||||
| [Design Rationale](design-rationale/overview.md) | ADRs and "why not Signal/Matrix" comparison |
|
||||
| [Design Rationale](design-rationale/overview.md) | ADRs and protocol design decisions |
|
||||
| [Roadmap](roadmap/milestones.md) | Milestone tracker and future research directions |
|
||||
|
||||
---
|
||||
|
||||
80
docs/src/roadmap/phase2-and-m4-m6.md
Normal file
80
docs/src/roadmap/phase2-and-m4-m6.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# Phase 2 (Protocol Hardening) + M4–M6 Roadmap
|
||||
|
||||
This page tracks implementation of **Phase 2** (protocol hardening) from the
|
||||
[Production Readiness WBS](production-readiness.md), followed by **M4** (Group CLI),
|
||||
**M5** (Multi-party groups), and **M6** (Persistence).
|
||||
|
||||
---
|
||||
|
||||
## Legacy code removed
|
||||
|
||||
The following legacy behaviour has been removed; only current behaviour is supported:
|
||||
|
||||
- **Auth:** Server no longer accepts "any non-empty token" when no static token is configured. Either a static `auth_token` or a valid OPAQUE session token is required (auth version 1 only).
|
||||
- **Wire version:** Only wire version `1` is accepted on `enqueue`, `fetch`, `fetchWait`. Version `0` is rejected.
|
||||
- **Delivery storage:** Server only loads the channel-aware delivery map format (v2). Old v1 `deliveries.bin` files will not load; delete or migrate the file.
|
||||
- **Client:** Hybrid decryption is required for Welcome and application payloads. No fallback to plaintext MLS; missing or failed hybrid decrypt returns an error.
|
||||
|
||||
---
|
||||
|
||||
## Phase 2 — Protocols and Core Hardening
|
||||
|
||||
| Task | Status | Notes |
|
||||
|------|--------|-------|
|
||||
| **Ciphersuite allowlist** | **Done** | Server rejects KeyPackages whose ciphersuite is not `MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519`. See `quicnprotochat_core::validate_keypackage_ciphersuite` and `upload_key_package` (E021). |
|
||||
| **ALPN enforcement** | **Done** | Server TLS config sets `alpn_protocols = [b"capnp"]`; handshake completes only if client offers `capnp`. |
|
||||
| **Connection draining** | **Done** | On `Ctrl+C`, server calls `endpoint.close(0, b"server shutdown")` and exits the accept loop. |
|
||||
| **Wire versioning** | **Done** | `enqueue`, `fetch`, `fetchWait` require `version == CURRENT_WIRE_VERSION` (1). Other RPCs use auth version. |
|
||||
| **Downgrade guards** | **Deferred** | MLS epoch/ciphersuite consistency is enforced by openmls when processing commits. Explicit epoch-rollback checks can be added in M5. |
|
||||
| **KeyPackage rotation** | **Doc** | Clients should upload a fresh KeyPackage before the 24h TTL. Helper or background task can be added in M4. |
|
||||
|
||||
---
|
||||
|
||||
## M4 — Group CLI Subcommands
|
||||
|
||||
**Goal:** Persistent, composable CLI for group operations (replace monolithic `demo-group`).
|
||||
|
||||
| Deliverable | Status |
|
||||
|-------------|--------|
|
||||
| `create-group` | Planned |
|
||||
| `invite <identity>` | Planned |
|
||||
| `join` | Planned |
|
||||
| `send <message>` | Planned |
|
||||
| `recv` | Planned |
|
||||
| Keep `demo-group` | Existing |
|
||||
|
||||
See [Milestones](milestones.md#m4--group-cli-subcommands-next).
|
||||
|
||||
---
|
||||
|
||||
## M5 — Multi-party Groups
|
||||
|
||||
**Goal:** N > 2 members, commit fan-out, proposal handling.
|
||||
|
||||
| Deliverable | Status |
|
||||
|-------------|--------|
|
||||
| Commit fan-out via DS | Planned |
|
||||
| Proposal handling (Add, Remove, Update) | Planned |
|
||||
| Epoch sync across N members | Planned |
|
||||
| Benchmarks | Planned |
|
||||
|
||||
---
|
||||
|
||||
## M6 — Persistence
|
||||
|
||||
**Goal:** Server survives restart; client state persists across sessions.
|
||||
|
||||
| Deliverable | Status |
|
||||
|-------------|--------|
|
||||
| SQLite/SQLCipher (AS + DS) | Partial (SqlStore exists) |
|
||||
| `migrations/` | Planned |
|
||||
| Client reconnect + session resume | Planned |
|
||||
| Docker + healthcheck | Partial (Dockerfile exists) |
|
||||
|
||||
---
|
||||
|
||||
## Cross-references
|
||||
|
||||
- [Production Readiness WBS](production-readiness.md) — Phase 2 definition
|
||||
- [Milestones](milestones.md) — M4, M5, M6 details
|
||||
- [Auth, Devices, and Tokens](authz-plan.md) — Phase 3
|
||||
@@ -21,8 +21,7 @@ interface NodeService {
|
||||
# Upload a single-use KeyPackage for later retrieval by peers.
|
||||
# identityKey : Ed25519 public key bytes (32 bytes)
|
||||
# package : TLS-encoded openmls KeyPackage
|
||||
# auth : Auth context (versioned). For legacy clients, pass an empty
|
||||
# struct or version=0.
|
||||
# auth : Auth context (version=1, non-empty accessToken required).
|
||||
uploadKeyPackage @0 (identityKey :Data, package :Data, auth :Auth)
|
||||
-> (fingerprint :Data);
|
||||
|
||||
@@ -33,7 +32,7 @@ interface NodeService {
|
||||
# Enqueue an opaque payload for delivery to a recipient.
|
||||
# channelId : Optional channel identifier (empty for legacy). A 16-byte UUID
|
||||
# is recommended for 1:1 channels.
|
||||
# version : Schema/wire version. Must be 0 (legacy) or 1 (this spec).
|
||||
# version : Schema/wire version. Must be 1.
|
||||
enqueue @2 (recipientKey :Data, payload :Data, channelId :Data,
|
||||
version :UInt16, auth :Auth) -> ();
|
||||
|
||||
@@ -57,7 +56,7 @@ interface NodeService {
|
||||
}
|
||||
|
||||
struct Auth {
|
||||
version @0 :UInt16; # 0 = legacy/none, 1 = token-based auth
|
||||
version @0 :UInt16; # 1 = token-based auth (required)
|
||||
accessToken @1 :Data; # opaque bearer token issued at login
|
||||
deviceId @2 :Data; # optional UUID bytes for auditing/rate limiting
|
||||
}
|
||||
@@ -108,7 +107,7 @@ Enqueues an opaque payload for delivery. Identical semantics to the standalone [
|
||||
| `recipientKey` | `Data` | 32 bytes | Recipient's raw Ed25519 public key |
|
||||
| `payload` | `Data` | Variable | Opaque byte string (typically MLS ciphertext) |
|
||||
| `channelId` | `Data` | 0 or 16 bytes | Channel identifier (empty for legacy, UUID recommended) |
|
||||
| `version` | `UInt16` | 2 bytes | Wire version: `0` = legacy, `1` = current |
|
||||
| `version` | `UInt16` | 2 bytes | Wire version: `1` = current (required) |
|
||||
| `auth` | `Auth` | Struct | Authentication context |
|
||||
|
||||
#### `fetch @3`
|
||||
@@ -204,18 +203,9 @@ The `Auth` struct is attached to every mutating or per-user method call. It prov
|
||||
|
||||
| Version | Behavior |
|
||||
|---|---|
|
||||
| `0` | **Legacy / no authentication.** The server ignores `accessToken` and `deviceId`. All requests are accepted unconditionally. This is the default for M1-M3 development. |
|
||||
| `1` | **Token-based authentication.** The server validates `accessToken` and rejects requests with missing or invalid tokens. `deviceId` is used for audit logging. |
|
||||
| `1` | **Token-based authentication (required).** The server validates `accessToken` (static token or OPAQUE session) and rejects requests with missing or invalid tokens. `deviceId` is used for audit logging. |
|
||||
|
||||
### Backward compatibility
|
||||
|
||||
The `version` field enables a clean migration path:
|
||||
|
||||
1. **Existing clients** that do not set the `Auth` struct (or set `version=0`) continue to work with servers running in legacy mode.
|
||||
2. **New clients** set `version=1` and provide a valid `accessToken`.
|
||||
3. **The server** inspects `version` to decide which validation path to use. When the migration is complete, the server can reject `version=0` requests.
|
||||
|
||||
This pattern avoids the need for a breaking schema change when authentication is introduced.
|
||||
Auth version `0` is no longer supported; clients must send `version=1` and a valid token.
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -20,9 +20,9 @@ interface DeliveryService {
|
||||
# recipientKey : Ed25519 public key of the intended recipient (exactly 32 bytes).
|
||||
# payload : Opaque byte string — a TLS-encoded MlsMessageOut blob or any
|
||||
# other framed data the application layer wants to deliver.
|
||||
# channelId : Optional channel identifier (empty for legacy). A 16-byte UUID
|
||||
# channelId : Optional channel identifier (empty for default). A 16-byte UUID
|
||||
# is recommended for 1:1 channels.
|
||||
# version : Schema/wire version. Must be 0 (legacy) or 1 (this spec).
|
||||
# version : Schema/wire version. Must be 1.
|
||||
#
|
||||
# The payload is appended to the recipient's FIFO queue. Returns immediately;
|
||||
# the recipient retrieves it via `fetch`.
|
||||
@@ -31,8 +31,8 @@ interface DeliveryService {
|
||||
# Fetch and atomically drain all queued payloads for a given recipient.
|
||||
#
|
||||
# recipientKey : Ed25519 public key of the caller (exactly 32 bytes).
|
||||
# channelId : Optional channel identifier (empty for legacy).
|
||||
# version : Schema/wire version. Must be 0 (legacy) or 1 (this spec).
|
||||
# channelId : Optional channel identifier (empty for default).
|
||||
# version : Schema/wire version. Must be 1.
|
||||
#
|
||||
# Returns the complete queue in FIFO order and clears it. Returns an empty
|
||||
# list if there are no pending messages.
|
||||
|
||||
@@ -9,8 +9,7 @@ interface NodeService {
|
||||
# Upload a single-use KeyPackage for later retrieval by peers.
|
||||
# identityKey : Ed25519 public key bytes (32 bytes)
|
||||
# package : TLS-encoded openmls KeyPackage
|
||||
# auth : Auth context (versioned). For legacy clients, pass an empty
|
||||
# struct or version=0.
|
||||
# auth : Auth context (version=1, non-empty accessToken required).
|
||||
uploadKeyPackage @0 (identityKey :Data, package :Data, auth :Auth) -> (fingerprint :Data);
|
||||
|
||||
# Fetch and atomically remove one KeyPackage for a given identity key.
|
||||
@@ -18,9 +17,9 @@ interface NodeService {
|
||||
fetchKeyPackage @1 (identityKey :Data, auth :Auth) -> (package :Data);
|
||||
|
||||
# Enqueue an opaque payload for delivery to a recipient.
|
||||
# channelId : Optional channel identifier (empty for legacy). A 16-byte UUID
|
||||
# channelId : Optional channel identifier (empty for default). A 16-byte UUID
|
||||
# is recommended for 1:1 channels.
|
||||
# version : Schema/wire version. Must be 0 (legacy) or 1 (this spec).
|
||||
# version : Schema/wire version. Must be 1.
|
||||
enqueue @2 (recipientKey :Data, payload :Data, channelId :Data, version :UInt16, auth :Auth) -> ();
|
||||
|
||||
# Fetch and drain all queued payloads for the recipient.
|
||||
@@ -67,7 +66,7 @@ interface NodeService {
|
||||
}
|
||||
|
||||
struct Auth {
|
||||
version @0 :UInt16; # 0 = legacy/none, 1 = token-based auth
|
||||
version @0 :UInt16; # 1 = token-based auth (required)
|
||||
accessToken @1 :Data; # opaque bearer token issued at login
|
||||
deviceId @2 :Data; # optional UUID bytes for auditing/rate limiting
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user