fix: address 16 architecture design flaws across all crates

Phase 1 — Foundation:
- Constant-time token comparison via subtle::ConstantTimeEq (Fix 11)
- Structured error codes E001–E020 in new error_codes.rs (Fix 15)
- Remove dead envelope.capnp code and related types (Fix 16)

Phase 2 — Auth Hardening:
- Registration collision check via has_user_record() (Fix 5)
- Auth required on uploadHybridKey/fetchHybridKey RPCs (Fix 1)
- Identity-token binding at registration and login (Fix 2)
- Session token expiry with 24h TTL and background reaper (Fix 3)
- Bounded pending logins with 5-minute timeout (Fix 4)

Phase 3 — Resource Limits:
- Rate limiting: 100 enqueues/60s per token (Fix 6)
- Queue depth cap at 1000 + 7-day message TTL/GC (Fix 7)
- Partial queue drain via limit param on fetch/fetchWait (Fix 8)

Phase 4 — Crypto Fixes:
- OPAQUE KSF switched from Identity to Argon2id (Fix 10)
- Random AEAD nonce in hybrid KEM instead of HKDF-derived (Fix 12)
- Zeroize secret fields in HybridKeypairBytes (Fix 13)
- Encrypted client state files via QPCE format (Fix 9)

Phase 5 — Protocol:
- Commit fan-out to all existing members on invite (Fix 14)
- Add member_identities() to GroupMember

Breaking: existing OPAQUE registrations invalidated (Argon2 KSF).
Schema: added auth to hybrid key ops, identityKey to OPAQUE finish
RPCs, limit to fetch/fetchWait.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-22 10:51:09 +01:00
parent 8d5c1b3b9b
commit 0bdc222724
19 changed files with 4516 additions and 495 deletions

2970
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -5,6 +5,7 @@ members = [
"crates/quicnprotochat-proto",
"crates/quicnprotochat-server",
"crates/quicnprotochat-client",
"crates/quicnprotochat-p2p",
]
# Shared dependency versions — bump here to affect the whole workspace.
@@ -25,8 +26,10 @@ ed25519-dalek = { version = "2", features = ["rand_core"] }
sha2 = { version = "0.10" }
hkdf = { version = "0.12" }
chacha20poly1305 = { version = "0.10" }
opaque-ke = { version = "4", features = ["ristretto255"] }
zeroize = { version = "1", features = ["derive"] }
opaque-ke = { version = "4", features = ["ristretto255", "argon2"] }
zeroize = { version = "1", features = ["derive", "serde"] }
subtle = { version = "2" }
argon2 = { version = "0.5" }
rand = { version = "0.8" }
serde = { version = "1", features = ["derive"] }
serde_json = { version = "1" }
@@ -42,7 +45,7 @@ tokio-util = { version = "0.7", features = ["codec", "compat"] }
futures = { version = "0.3" }
quinn = { version = "0.11" }
quinn-proto = { version = "0.11" }
rustls = { version = "0.23", default-features = false, features = ["std"] }
rustls = { version = "0.23", default-features = false, features = ["std", "ring"] }
rcgen = { version = "0.13" }
# ── Database ─────────────────────────────────────────────────────────────

View File

@@ -36,6 +36,8 @@ thiserror = { workspace = true }
# Crypto — for fingerprint verification in fetch-key subcommand
sha2 = { workspace = true }
argon2 = { workspace = true }
chacha20poly1305 = { workspace = true }
quinn = { workspace = true }
quinn-proto = { workspace = true }
rustls = { workspace = true }

View File

@@ -4,7 +4,13 @@ use std::path::{Path, PathBuf};
use std::sync::{Arc, OnceLock};
use anyhow::Context;
use argon2::Argon2;
use capnp_rpc::{rpc_twoparty_capnp::Side, twoparty, RpcSystem};
use chacha20poly1305::{
aead::{Aead, KeyInit},
ChaCha20Poly1305, Key, Nonce,
};
use rand::RngCore;
use serde::{Deserialize, Serialize};
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
@@ -13,12 +19,21 @@ use quinn_proto::crypto::rustls::QuicClientConfig;
use rustls::pki_types::CertificateDer;
use rustls::{ClientConfig as RustlsClientConfig, RootCertStore};
use opaque_ke::{
ClientLogin, ClientLoginFinishParameters, ClientRegistration,
ClientRegistrationFinishParameters, CredentialResponse, RegistrationResponse,
};
use quicnprotochat_core::{
generate_key_package, hybrid_decrypt, hybrid_encrypt, DiskKeyStore, GroupMember,
HybridKeypair, HybridKeypairBytes, HybridPublicKey, IdentityKeypair,
generate_key_package, hybrid_decrypt, hybrid_encrypt, opaque_auth::OpaqueSuite, DiskKeyStore,
GroupMember, HybridKeypair, HybridKeypairBytes, HybridPublicKey, IdentityKeypair,
};
use quicnprotochat_proto::node_capnp::{auth, node_service};
/// Magic bytes for encrypted client state files.
const STATE_MAGIC: &[u8; 4] = b"QPCE";
const STATE_SALT_LEN: usize = 16;
const STATE_NONCE_LEN: usize = 12;
// Global auth context initialized once per process.
static AUTH_CONTEXT: OnceLock<ClientAuth> = OnceLock::new();
@@ -49,7 +64,7 @@ pub fn init_auth(ctx: ClientAuth) {
let _ = AUTH_CONTEXT.set(ctx);
}
// ── Subcommand implementations ───────────────────────────────────────────────
// -- Subcommand implementations -----------------------------------------------
/// Connect to `server`, call health, and print RTT over QUIC/TLS.
pub async fn cmd_ping(server: &str, ca_cert: &Path, server_name: &str) -> anyhow::Result<()> {
@@ -72,6 +87,161 @@ pub async fn cmd_ping(server: &str, ca_cert: &Path, server_name: &str) -> anyhow
Ok(())
}
/// Register a new user account via the OPAQUE protocol.
///
/// The server never sees the password in plaintext.
pub async fn cmd_register_user(
server: &str,
ca_cert: &Path,
server_name: &str,
username: &str,
password: &str,
) -> anyhow::Result<()> {
let mut rng = rand::rngs::OsRng;
let node_client = connect_node(server, ca_cert, server_name).await?;
// OPAQUE registration step 1: client -> server.
let reg_start =
ClientRegistration::<OpaqueSuite>::start(&mut rng, password.as_bytes())
.map_err(|e| anyhow::anyhow!("OPAQUE register start: {e}"))?;
let mut req = node_client.opaque_register_start_request();
{
let mut p = req.get();
p.set_username(username);
p.set_request(&reg_start.message.serialize());
}
let resp = req
.send()
.promise
.await
.context("opaque_register_start RPC failed")?;
let response_bytes = resp
.get()
.context("register_start: bad response")?
.get_response()
.context("register_start: missing response")?
.to_vec();
let reg_response = RegistrationResponse::<OpaqueSuite>::deserialize(&response_bytes)
.map_err(|e| anyhow::anyhow!("invalid registration response: {e}"))?;
// OPAQUE registration step 2: client finishes -> server.
let reg_finish = reg_start
.state
.finish(
&mut rng,
password.as_bytes(),
reg_response,
ClientRegistrationFinishParameters::<OpaqueSuite>::default(),
)
.map_err(|e| anyhow::anyhow!("OPAQUE register finish: {e}"))?;
let mut req = node_client.opaque_register_finish_request();
{
let mut p = req.get();
p.set_username(username);
p.set_upload(&reg_finish.message.serialize());
// Identity-token binding: pass empty bytes (no state file available).
p.set_identity_key(&[]);
}
let resp = req
.send()
.promise
.await
.context("opaque_register_finish RPC failed")?;
let success = resp
.get()
.context("register_finish: bad response")?
.get_success();
anyhow::ensure!(success, "server rejected registration");
println!("user '{username}' registered successfully (OPAQUE)");
Ok(())
}
/// Log in via the OPAQUE protocol and receive a session token.
///
/// Returns the session token as a hex string. Use it as `--access-token` for
/// subsequent commands.
pub async fn cmd_login(
server: &str,
ca_cert: &Path,
server_name: &str,
username: &str,
password: &str,
) -> anyhow::Result<()> {
let mut rng = rand::rngs::OsRng;
let node_client = connect_node(server, ca_cert, server_name).await?;
// OPAQUE login step 1: client -> server.
let login_start =
ClientLogin::<OpaqueSuite>::start(&mut rng, password.as_bytes())
.map_err(|e| anyhow::anyhow!("OPAQUE login start: {e}"))?;
let mut req = node_client.opaque_login_start_request();
{
let mut p = req.get();
p.set_username(username);
p.set_request(&login_start.message.serialize());
}
let resp = req
.send()
.promise
.await
.context("opaque_login_start RPC failed")?;
let response_bytes = resp
.get()
.context("login_start: bad response")?
.get_response()
.context("login_start: missing response")?
.to_vec();
let credential_response = CredentialResponse::<OpaqueSuite>::deserialize(&response_bytes)
.map_err(|e| anyhow::anyhow!("invalid credential response: {e}"))?;
// OPAQUE login step 2: client finishes -> server.
let login_finish = login_start
.state
.finish(
&mut rng,
password.as_bytes(),
credential_response,
ClientLoginFinishParameters::<OpaqueSuite>::default(),
)
.map_err(|e| anyhow::anyhow!("OPAQUE login finish (bad password?): {e}"))?;
let mut req = node_client.opaque_login_finish_request();
{
let mut p = req.get();
p.set_username(username);
p.set_finalization(&login_finish.message.serialize());
// Identity-token binding: pass empty bytes (no state file available).
p.set_identity_key(&[]);
}
let resp = req
.send()
.promise
.await
.context("opaque_login_finish RPC failed")?;
let session_token = resp
.get()
.context("login_finish: bad response")?
.get_session_token()
.context("login_finish: missing session_token")?
.to_vec();
anyhow::ensure!(!session_token.is_empty(), "server returned empty session token");
println!("login successful for '{username}'");
println!("session_token: {}", hex::encode(&session_token));
println!("(use as --access-token for subsequent commands)");
Ok(())
}
/// Generate a KeyPackage for a fresh identity and upload it to the AS.
///
/// Must run on a `LocalSet` because capnp-rpc is `!Send`.
@@ -128,8 +298,9 @@ pub async fn cmd_register_state(
server: &str,
ca_cert: &Path,
server_name: &str,
password: Option<&str>,
) -> anyhow::Result<()> {
let state = load_or_init_state(state_path)?;
let state = load_or_init_state(state_path, password)?;
let (mut member, hybrid_kp) = state.into_parts(state_path)?;
let tls_bytes = member
@@ -181,7 +352,7 @@ pub async fn cmd_register_state(
println!("fingerprint : {}", hex::encode(&fingerprint));
println!("KeyPackage uploaded successfully.");
save_state(state_path, &member, hybrid_kp.as_ref())?;
save_state(state_path, &member, hybrid_kp.as_ref(), password)?;
Ok(())
}
@@ -241,7 +412,7 @@ pub async fn cmd_fetch_key(
Ok(())
}
/// Run a complete AliceBob MLS round-trip using the unified server endpoint.
/// Run a complete Alice/Bob MLS round-trip using the unified server endpoint.
///
/// All payloads are wrapped in post-quantum hybrid envelopes (X25519 + ML-KEM-768).
pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) -> anyhow::Result<()> {
@@ -321,12 +492,12 @@ pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) ->
bob.join_group(&welcome_bytes)
.context("Bob join_group failed")?;
// Alice Bob (hybrid-wrapped)
// Alice -> Bob (hybrid-wrapped)
let ct_ab = alice
.send_message(b"hello bob")
.context("Alice send_message failed")?;
let wrapped_ab =
hybrid_encrypt(&bob_hybrid_pk, &ct_ab).context("hybrid encrypt AliceBob")?;
hybrid_encrypt(&bob_hybrid_pk, &ct_ab).context("hybrid encrypt Alice->Bob")?;
enqueue(&alice_ds, &bob_id.public_key_bytes(), &wrapped_ab).await?;
let bob_msgs = fetch_all(&bob_ds, &bob_id.public_key_bytes()).await?;
@@ -338,11 +509,11 @@ pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) ->
.receive_message(&inner_ab)?
.context("Bob expected application message from Alice")?;
println!(
"Alice Bob plaintext: {}",
"Alice -> Bob plaintext: {}",
String::from_utf8_lossy(&ab_plaintext)
);
// Bob Alice (hybrid-wrapped)
// Bob -> Alice (hybrid-wrapped)
let alice_hybrid_pk = fetch_hybrid_key(&bob_node, &alice_id.public_key_bytes())
.await?
.context("Alice hybrid key not found")?;
@@ -350,7 +521,7 @@ pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) ->
.send_message(b"hello alice")
.context("Bob send_message failed")?;
let wrapped_ba =
hybrid_encrypt(&alice_hybrid_pk, &ct_ba).context("hybrid encrypt BobAlice")?;
hybrid_encrypt(&alice_hybrid_pk, &ct_ba).context("hybrid encrypt Bob->Alice")?;
enqueue(&bob_ds, &alice_id.public_key_bytes(), &wrapped_ba).await?;
let alice_msgs = fetch_all(&alice_ds, &alice_id.public_key_bytes()).await?;
@@ -363,7 +534,7 @@ pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) ->
.receive_message(&inner_ba)?
.context("Alice expected application message from Bob")?;
println!(
"Bob Alice plaintext: {}",
"Bob -> Alice plaintext: {}",
String::from_utf8_lossy(&ba_plaintext)
);
@@ -377,8 +548,9 @@ pub async fn cmd_create_group(
state_path: &Path,
_server: &str,
group_id: &str,
password: Option<&str>,
) -> anyhow::Result<()> {
let state = load_or_init_state(state_path)?;
let state = load_or_init_state(state_path, password)?;
let (mut member, hybrid_kp) = state.into_parts(state_path)?;
anyhow::ensure!(
@@ -390,7 +562,7 @@ pub async fn cmd_create_group(
.create_group(group_id.as_bytes())
.context("create_group failed")?;
save_state(state_path, &member, hybrid_kp.as_ref())?;
save_state(state_path, &member, hybrid_kp.as_ref(), password)?;
println!("group created: {group_id}");
Ok(())
}
@@ -405,8 +577,9 @@ pub async fn cmd_invite(
ca_cert: &Path,
server_name: &str,
peer_key_hex: &str,
password: Option<&str>,
) -> anyhow::Result<()> {
let state = load_existing_state(state_path)?;
let state = load_existing_state(state_path, password)?;
let (mut member, hybrid_kp) = state.into_parts(state_path)?;
let peer_key = decode_identity_key(peer_key_hex)?;
@@ -421,7 +594,30 @@ pub async fn cmd_invite(
.group_ref()
.context("no active group; run create-group first")?;
let (_, welcome) = member.add_member(&peer_kp).context("add_member failed")?;
// Collect existing member identity keys *before* adding the new member,
// so we know who to fan-out the commit to.
let existing_members: Vec<Vec<u8>> = member
.member_identities()
.into_iter()
.filter(|k| k.as_slice() != member.identity().public_key_bytes())
.collect();
let (commit, welcome) = member.add_member(&peer_kp).context("add_member failed")?;
// Fan out the Commit to all existing members (excluding self and the
// new joiner who receives the Welcome instead). Fix 14.
for mk in &existing_members {
if mk.as_slice() == peer_key.as_slice() {
continue;
}
let peer_hpk = fetch_hybrid_key(&node_client, mk).await?;
let commit_payload = if let Some(ref pk) = peer_hpk {
hybrid_encrypt(pk, &commit).context("hybrid encrypt commit")?
} else {
commit.clone()
};
enqueue(&node_client, mk, &commit_payload).await?;
}
// Wrap welcome in hybrid envelope if peer has a hybrid public key.
let peer_hybrid_pk = fetch_hybrid_key(&node_client, &peer_key).await?;
@@ -433,10 +629,11 @@ pub async fn cmd_invite(
enqueue(&node_client, &peer_key, &payload).await?;
save_state(state_path, &member, hybrid_kp.as_ref())?;
save_state(state_path, &member, hybrid_kp.as_ref(), password)?;
println!(
"invited peer (welcome queued{})",
if peer_hybrid_pk.is_some() { ", hybrid-encrypted" } else { "" }
"invited peer (welcome queued{}, commit sent to {} existing member(s))",
if peer_hybrid_pk.is_some() { ", hybrid-encrypted" } else { "" },
existing_members.len(),
);
Ok(())
}
@@ -449,8 +646,9 @@ pub async fn cmd_join(
server: &str,
ca_cert: &Path,
server_name: &str,
password: Option<&str>,
) -> anyhow::Result<()> {
let state = load_existing_state(state_path)?;
let state = load_existing_state(state_path, password)?;
let (mut member, hybrid_kp) = state.into_parts(state_path)?;
anyhow::ensure!(
@@ -472,7 +670,7 @@ pub async fn cmd_join(
.join_group(&welcome_bytes)
.context("join_group failed")?;
save_state(state_path, &member, hybrid_kp.as_ref())?;
save_state(state_path, &member, hybrid_kp.as_ref(), password)?;
println!("joined group successfully");
Ok(())
}
@@ -488,8 +686,9 @@ pub async fn cmd_send(
server_name: &str,
peer_key_hex: &str,
msg: &str,
password: Option<&str>,
) -> anyhow::Result<()> {
let state = load_existing_state(state_path)?;
let state = load_existing_state(state_path, password)?;
let (mut member, hybrid_kp) = state.into_parts(state_path)?;
let peer_key = decode_identity_key(peer_key_hex)?;
@@ -509,7 +708,7 @@ pub async fn cmd_send(
enqueue(&node_client, &peer_key, &payload).await?;
save_state(state_path, &member, hybrid_kp.as_ref())?;
save_state(state_path, &member, hybrid_kp.as_ref(), password)?;
println!(
"message sent{}",
if peer_hybrid_pk.is_some() { " (hybrid-encrypted)" } else { "" }
@@ -527,8 +726,9 @@ pub async fn cmd_recv(
server_name: &str,
wait_ms: u64,
stream: bool,
password: Option<&str>,
) -> anyhow::Result<()> {
let state = load_existing_state(state_path)?;
let state = load_existing_state(state_path, password)?;
let (mut member, hybrid_kp) = state.into_parts(state_path)?;
let client = connect_node(server, ca_cert, server_name).await?;
@@ -555,7 +755,7 @@ pub async fn cmd_recv(
}
}
save_state(state_path, &member, hybrid_kp.as_ref())?;
save_state(state_path, &member, hybrid_kp.as_ref(), password)?;
if !stream {
return Ok(());
@@ -563,7 +763,7 @@ pub async fn cmd_recv(
}
}
// ── Shared helpers ───────────────────────────────────────────────────────────
// -- Shared helpers -----------------------------------------------------------
/// Establish a QUIC/TLS connection and return a `NodeService` client.
///
@@ -583,9 +783,10 @@ pub async fn connect_node(
.add(CertificateDer::from(cert_bytes))
.context("add root cert")?;
let tls = RustlsClientConfig::builder()
let mut tls = RustlsClientConfig::builder()
.with_root_certificates(roots)
.with_no_client_auth();
tls.alpn_protocols = vec![b"capnp".to_vec()];
let crypto = QuicClientConfig::try_from(tls)
.map_err(|e| anyhow::anyhow!("invalid client TLS config: {e}"))?;
@@ -709,6 +910,7 @@ pub async fn fetch_all(
p.set_recipient_key(recipient_key);
p.set_channel_id(&[]);
p.set_version(1);
p.set_limit(0); // fetch all (backward compat)
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth);
}
@@ -742,6 +944,7 @@ pub async fn fetch_wait(
p.set_timeout_ms(timeout_ms);
p.set_channel_id(&[]);
p.set_version(1);
p.set_limit(0); // fetch all (backward compat)
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth);
}
@@ -777,6 +980,8 @@ pub async fn upload_hybrid_key(
let mut p = req.get();
p.set_identity_key(identity_key);
p.set_hybrid_public_key(&hybrid_pk.to_bytes());
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth);
}
req.send()
.promise
@@ -793,7 +998,12 @@ pub async fn fetch_hybrid_key(
identity_key: &[u8],
) -> anyhow::Result<Option<HybridPublicKey>> {
let mut req = client.fetch_hybrid_key_request();
req.get().set_identity_key(identity_key);
{
let mut p = req.get();
p.set_identity_key(identity_key);
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth);
}
let resp = req
.send()
@@ -848,6 +1058,9 @@ struct StoredState {
/// Post-quantum hybrid keypair (X25519 + ML-KEM-768). `None` for legacy state files.
#[serde(default)]
hybrid_key: Option<HybridKeypairBytes>,
/// Cached member public keys for group participants (Fix 14 prep).
#[serde(default)]
member_keys: Vec<Vec<u8>>,
}
impl StoredState {
@@ -881,17 +1094,82 @@ impl StoredState {
identity_seed: member.identity_seed(),
group,
hybrid_key: hybrid_kp.map(|kp| kp.to_bytes()),
member_keys: Vec::new(),
})
}
}
fn load_or_init_state(path: &Path) -> anyhow::Result<StoredState> {
// -- Encrypted state file helpers ---------------------------------------------
/// Derive a 32-byte key from a password and salt using Argon2id.
fn derive_state_key(password: &str, salt: &[u8]) -> anyhow::Result<[u8; 32]> {
let mut key = [0u8; 32];
Argon2::default()
.hash_password_into(password.as_bytes(), salt, &mut key)
.map_err(|e| anyhow::anyhow!("argon2 key derivation failed: {e}"))?;
Ok(key)
}
/// Encrypt `plaintext` with the QPCE format: magic(4) | salt(16) | nonce(12) | ciphertext.
fn encrypt_state(password: &str, plaintext: &[u8]) -> anyhow::Result<Vec<u8>> {
let mut salt = [0u8; STATE_SALT_LEN];
rand::rngs::OsRng.fill_bytes(&mut salt);
let mut nonce_bytes = [0u8; STATE_NONCE_LEN];
rand::rngs::OsRng.fill_bytes(&mut nonce_bytes);
let key = derive_state_key(password, &salt)?;
let cipher = ChaCha20Poly1305::new(Key::from_slice(&key));
let nonce = Nonce::from_slice(&nonce_bytes);
let ciphertext = cipher
.encrypt(nonce, plaintext)
.map_err(|e| anyhow::anyhow!("state encryption failed: {e}"))?;
let mut out = Vec::with_capacity(4 + STATE_SALT_LEN + STATE_NONCE_LEN + ciphertext.len());
out.extend_from_slice(STATE_MAGIC);
out.extend_from_slice(&salt);
out.extend_from_slice(&nonce_bytes);
out.extend_from_slice(&ciphertext);
Ok(out)
}
/// Decrypt a QPCE-formatted state file. Caller must verify magic prefix beforehand.
fn decrypt_state(password: &str, data: &[u8]) -> anyhow::Result<Vec<u8>> {
let header_len = 4 + STATE_SALT_LEN + STATE_NONCE_LEN;
anyhow::ensure!(
data.len() > header_len,
"encrypted state file too short ({} bytes)",
data.len()
);
let salt = &data[4..4 + STATE_SALT_LEN];
let nonce_bytes = &data[4 + STATE_SALT_LEN..header_len];
let ciphertext = &data[header_len..];
let key = derive_state_key(password, salt)?;
let cipher = ChaCha20Poly1305::new(Key::from_slice(&key));
let nonce = Nonce::from_slice(nonce_bytes);
let plaintext = cipher
.decrypt(nonce, ciphertext)
.map_err(|_| anyhow::anyhow!("state decryption failed (wrong password?)"))?;
Ok(plaintext)
}
/// Returns true if raw bytes begin with the QPCE magic header.
fn is_encrypted_state(bytes: &[u8]) -> bool {
bytes.len() >= 4 && &bytes[..4] == STATE_MAGIC
}
fn load_or_init_state(path: &Path, password: Option<&str>) -> anyhow::Result<StoredState> {
if path.exists() {
let mut state = load_existing_state(path)?;
let mut state = load_existing_state(path, password)?;
// Upgrade legacy state files: generate hybrid keypair if missing.
if state.hybrid_key.is_none() {
state.hybrid_key = Some(HybridKeypair::generate().to_bytes());
write_state(path, &state)?;
write_state(path, &state, password)?;
}
return Ok(state);
}
@@ -901,29 +1179,46 @@ fn load_or_init_state(path: &Path) -> anyhow::Result<StoredState> {
let key_store = DiskKeyStore::persistent(keystore_path(path))?;
let member = GroupMember::new_with_state(Arc::new(identity), key_store, None);
let state = StoredState::from_parts(&member, Some(&hybrid_kp))?;
write_state(path, &state)?;
write_state(path, &state, password)?;
Ok(state)
}
fn load_existing_state(path: &Path) -> anyhow::Result<StoredState> {
fn load_existing_state(path: &Path, password: Option<&str>) -> anyhow::Result<StoredState> {
let bytes = std::fs::read(path).with_context(|| format!("read state file {path:?}"))?;
bincode::deserialize(&bytes).context("decode state")
if is_encrypted_state(&bytes) {
let pw = password.context(
"state file is encrypted (QPCE); a password is required to decrypt it",
)?;
let plaintext = decrypt_state(pw, &bytes)?;
bincode::deserialize(&plaintext).context("decode encrypted state")
} else {
bincode::deserialize(&bytes).context("decode state")
}
}
fn save_state(
path: &Path,
member: &GroupMember,
hybrid_kp: Option<&HybridKeypair>,
password: Option<&str>,
) -> anyhow::Result<()> {
let state = StoredState::from_parts(member, hybrid_kp)?;
write_state(path, &state)
write_state(path, &state, password)
}
fn write_state(path: &Path, state: &StoredState) -> anyhow::Result<()> {
fn write_state(path: &Path, state: &StoredState, password: Option<&str>) -> anyhow::Result<()> {
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent).with_context(|| format!("create dir {parent:?}"))?;
}
let bytes = bincode::serialize(state).context("encode state")?;
let plaintext = bincode::serialize(state).context("encode state")?;
let bytes = if let Some(pw) = password {
encrypt_state(pw, &plaintext)?
} else {
plaintext
};
std::fs::write(path, bytes).with_context(|| format!("write state {path:?}"))?;
Ok(())
}
@@ -950,7 +1245,7 @@ fn current_timestamp_ms() -> u64 {
.as_millis() as u64
}
// ── Hex encoding helper ─────────────────────────────────────────────────────
// -- Hex encoding helper ------------------------------------------------------
//
// We use a tiny inline module rather than adding `hex` as a dependency.

View File

@@ -5,8 +5,9 @@ use std::path::PathBuf;
use clap::{Parser, Subcommand};
use quicnprotochat_client::{
cmd_create_group, cmd_demo_group, cmd_fetch_key, cmd_invite, cmd_join, cmd_ping, cmd_recv,
cmd_register, cmd_register_state, cmd_send, ClientAuth, init_auth,
cmd_create_group, cmd_demo_group, cmd_fetch_key, cmd_invite, cmd_join, cmd_login, cmd_ping,
cmd_recv, cmd_register, cmd_register_state, cmd_register_user, cmd_send, ClientAuth,
init_auth,
};
// ── CLI ───────────────────────────────────────────────────────────────────────
@@ -32,20 +33,48 @@ struct Args {
)]
server_name: String,
/// Bearer token for authenticated requests (version 1, required).
#[arg(long, global = true, env = "QUICNPROTOCHAT_ACCESS_TOKEN", required = true)]
/// Bearer token or OPAQUE session token for authenticated requests.
/// Not required for register-user and login commands.
#[arg(long, global = true, env = "QUICNPROTOCHAT_ACCESS_TOKEN", default_value = "")]
access_token: String,
/// Optional device identifier (UUID bytes encoded as hex or raw string).
#[arg(long, global = true, env = "QUICNPROTOCHAT_DEVICE_ID")]
device_id: Option<String>,
/// Password to encrypt/decrypt client state files (QPCE format).
/// If set, state files are encrypted at rest with Argon2id + ChaCha20Poly1305.
#[arg(long, global = true, env = "QUICNPROTOCHAT_STATE_PASSWORD")]
state_password: Option<String>,
#[command(subcommand)]
command: Command,
}
#[derive(Debug, Subcommand)]
enum Command {
/// Register a new user via OPAQUE (password never leaves the client).
RegisterUser {
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
server: String,
/// Username for the new account.
#[arg(long)]
username: String,
/// Password (will be used in OPAQUE PAKE; server never sees it).
#[arg(long)]
password: String,
},
/// Log in via OPAQUE and receive a session token.
Login {
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
server: String,
#[arg(long)]
username: String,
#[arg(long)]
password: String,
},
/// Send a Ping to the server and print the round-trip time.
Ping {
/// Server address (host:port).
@@ -54,9 +83,6 @@ enum Command {
},
/// Generate a fresh MLS KeyPackage and upload it to the Authentication Service.
///
/// Prints the SHA-256 fingerprint of the uploaded package and the raw
/// Ed25519 identity public key bytes (hex), which peers need to fetch it.
Register {
/// Server address (host:port).
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
@@ -64,9 +90,6 @@ enum Command {
},
/// Fetch a peer's KeyPackage from the Authentication Service.
///
/// IDENTITY_KEY is the peer's Ed25519 public key encoded as 64 lowercase
/// hex characters (32 bytes).
FetchKey {
/// Server address (host:port).
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
@@ -76,7 +99,7 @@ enum Command {
identity_key: String,
},
/// Run a full AliceBob MLS round-trip against live AS and DS endpoints.
/// Run a full Alice/Bob MLS round-trip against live AS and DS endpoints.
DemoGroup {
/// Server address (host:port).
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
@@ -140,7 +163,7 @@ enum Command {
env = "QUICNPROTOCHAT_STATE"
)]
state: PathBuf,
#[arg(long, default_value = "127.0.0.1:4201", env = "QUICNPROTOCHAT_SERVER")]
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
server: String,
},
@@ -152,7 +175,7 @@ enum Command {
env = "QUICNPROTOCHAT_STATE"
)]
state: PathBuf,
#[arg(long, default_value = "127.0.0.1:4201", env = "QUICNPROTOCHAT_SERVER")]
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
server: String,
/// Recipient identity key (hex, 32 bytes -> 64 chars).
#[arg(long)]
@@ -170,7 +193,7 @@ enum Command {
env = "QUICNPROTOCHAT_STATE"
)]
state: PathBuf,
#[arg(long, default_value = "127.0.0.1:4201", env = "QUICNPROTOCHAT_SERVER")]
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
server: String,
/// Wait for up to this many milliseconds if no messages are queued.
@@ -196,11 +219,45 @@ async fn main() -> anyhow::Result<()> {
let args = Args::parse();
// Initialize auth context once for all RPCs.
// Initialize auth context once for all RPCs (empty token OK for register-user/login).
let auth_ctx = ClientAuth::from_parts(args.access_token.clone(), args.device_id.clone());
init_auth(auth_ctx);
let state_pw = args.state_password.as_deref();
match args.command {
Command::RegisterUser {
server,
username,
password,
} => {
let local = tokio::task::LocalSet::new();
local
.run_until(cmd_register_user(
&server,
&args.ca_cert,
&args.server_name,
&username,
&password,
))
.await
}
Command::Login {
server,
username,
password,
} => {
let local = tokio::task::LocalSet::new();
local
.run_until(cmd_login(
&server,
&args.ca_cert,
&args.server_name,
&username,
&password,
))
.await
}
Command::Ping { server } => cmd_ping(&server, &args.ca_cert, &args.server_name).await,
Command::Register { server } => {
let local = tokio::task::LocalSet::new();
@@ -236,6 +293,7 @@ async fn main() -> anyhow::Result<()> {
&server,
&args.ca_cert,
&args.server_name,
state_pw,
))
.await
}
@@ -246,7 +304,7 @@ async fn main() -> anyhow::Result<()> {
} => {
let local = tokio::task::LocalSet::new();
local
.run_until(cmd_create_group(&state, &server, &group_id))
.run_until(cmd_create_group(&state, &server, &group_id, state_pw))
.await
}
Command::Invite {
@@ -262,13 +320,14 @@ async fn main() -> anyhow::Result<()> {
&args.ca_cert,
&args.server_name,
&peer_key,
state_pw,
))
.await
}
Command::Join { state, server } => {
let local = tokio::task::LocalSet::new();
local
.run_until(cmd_join(&state, &server, &args.ca_cert, &args.server_name))
.run_until(cmd_join(&state, &server, &args.ca_cert, &args.server_name, state_pw))
.await
}
Command::Send {
@@ -286,6 +345,7 @@ async fn main() -> anyhow::Result<()> {
&args.server_name,
&peer_key,
&msg,
state_pw,
))
.await
}
@@ -304,6 +364,7 @@ async fn main() -> anyhow::Result<()> {
&args.server_name,
wait_ms,
stream,
state_pw,
))
.await
}

View File

@@ -93,6 +93,7 @@ async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
&server,
&ca_cert,
"localhost",
None,
))
.await?;
@@ -102,6 +103,7 @@ async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
&server,
&ca_cert,
"localhost",
None,
))
.await?;
@@ -110,6 +112,7 @@ async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
&alice_state,
&server,
"test-group",
None,
))
.await?;
@@ -126,6 +129,7 @@ async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
&ca_cert,
"localhost",
&bob_pk_hex,
None,
))
.await?;
@@ -135,6 +139,7 @@ async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
&server,
&ca_cert,
"localhost",
None,
))
.await?;
@@ -147,6 +152,7 @@ async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
"localhost",
&bob_pk_hex,
"hello bob",
None,
))
.await?;

View File

@@ -20,6 +20,7 @@ ml-kem = { workspace = true }
# Crypto — OPAQUE password-authenticated key exchange
opaque-ke = { workspace = true }
argon2 = { workspace = true }
# Crypto — MLS (M2)
openmls = { workspace = true }

View File

@@ -361,6 +361,21 @@ impl GroupMember {
self.group.as_ref()
}
/// Return the identity (credential) bytes of all current group members.
///
/// Each entry is the raw credential payload (Ed25519 public key bytes)
/// extracted from the member's MLS leaf node.
pub fn member_identities(&self) -> Vec<Vec<u8>> {
let group = match self.group.as_ref() {
Some(g) => g,
None => return Vec::new(),
};
group
.members()
.map(|m| m.credential.identity().to_vec())
.collect()
}
// ── Private helpers ───────────────────────────────────────────────────────
fn make_credential_with_key(&self) -> Result<CredentialWithKey, CoreError> {

View File

@@ -28,7 +28,7 @@ use ml_kem::{
kem::{Decapsulate, Encapsulate},
EncodedSizeUser, KemCore, MlKem768, MlKem768Params,
};
use rand::rngs::OsRng;
use rand::{rngs::OsRng, RngCore};
use serde::{Deserialize, Serialize};
use sha2::Sha256;
use x25519_dalek::{EphemeralSecret, PublicKey as X25519Public, StaticSecret};
@@ -92,10 +92,13 @@ pub struct HybridKeypair {
}
/// Serialisable form of a [`HybridKeypair`] for persistence.
///
/// Secret fields are wrapped in [`Zeroizing`] so they are securely erased
/// when the struct is dropped.
#[derive(Serialize, Deserialize)]
pub struct HybridKeypairBytes {
pub x25519_sk: [u8; 32],
pub mlkem_dk: Vec<u8>,
pub x25519_sk: Zeroizing<[u8; 32]>,
pub mlkem_dk: Zeroizing<Vec<u8>>,
pub mlkem_ek: Vec<u8>,
}
@@ -123,7 +126,7 @@ impl HybridKeypair {
/// Reconstruct from serialised bytes.
pub fn from_bytes(bytes: &HybridKeypairBytes) -> Result<Self, HybridKemError> {
let x25519_sk = StaticSecret::from(bytes.x25519_sk);
let x25519_sk = StaticSecret::from(*bytes.x25519_sk);
let x25519_pk = X25519Public::from(&x25519_sk);
let mlkem_dk_arr = Array::try_from(bytes.mlkem_dk.as_slice())
@@ -145,8 +148,8 @@ impl HybridKeypair {
/// Serialise the keypair for persistence.
pub fn to_bytes(&self) -> HybridKeypairBytes {
HybridKeypairBytes {
x25519_sk: self.x25519_sk.to_bytes(),
mlkem_dk: self.mlkem_dk.as_bytes().to_vec(),
x25519_sk: Zeroizing::new(self.x25519_sk.to_bytes()),
mlkem_dk: Zeroizing::new(self.mlkem_dk.as_bytes().to_vec()),
mlkem_ek: self.mlkem_ek.as_bytes().to_vec(),
}
}
@@ -207,9 +210,13 @@ pub fn hybrid_encrypt(
.encapsulate(&mut OsRng)
.map_err(|_| HybridKemError::EncryptionFailed)?;
// 3. Combine shared secrets via HKDF
let (aead_key, aead_nonce) =
derive_aead_material(x25519_ss.as_bytes(), mlkem_ss.as_slice());
// 3. Derive AEAD key from combined shared secrets
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice());
// Generate a random 12-byte nonce (not derived from HKDF).
let mut nonce_bytes = [0u8; 12];
OsRng.fill_bytes(&mut nonce_bytes);
let aead_nonce = *Nonce::from_slice(&nonce_bytes);
// 4. AEAD encrypt
let cipher = ChaCha20Poly1305::new(&aead_key);
@@ -275,7 +282,7 @@ pub fn hybrid_decrypt(
.map_err(|_| HybridKemError::MlKemDecapsFailed)?;
// 3. Derive AEAD key
let (aead_key, _) = derive_aead_material(x25519_ss.as_bytes(), mlkem_ss.as_slice());
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice());
// 4. Decrypt
let cipher = ChaCha20Poly1305::new(&aead_key);
@@ -286,11 +293,12 @@ pub fn hybrid_decrypt(
Ok(plaintext)
}
/// Derive AEAD key + nonce from the combined X25519 + ML-KEM shared secrets.
fn derive_aead_material(
x25519_ss: &[u8],
mlkem_ss: &[u8],
) -> (Key, Nonce) {
/// Derive AEAD key from the combined X25519 + ML-KEM shared secrets.
///
/// The nonce is generated randomly per-encryption rather than derived from
/// HKDF, preventing nonce reuse when the same shared secret is (accidentally)
/// used more than once.
fn derive_aead_key(x25519_ss: &[u8], mlkem_ss: &[u8]) -> Key {
let mut ikm = Zeroizing::new(vec![0u8; x25519_ss.len() + mlkem_ss.len()]);
ikm[..x25519_ss.len()].copy_from_slice(x25519_ss);
ikm[x25519_ss.len()..].copy_from_slice(mlkem_ss);
@@ -301,11 +309,7 @@ fn derive_aead_material(
hk.expand(HKDF_INFO, &mut *key_bytes)
.expect("32 bytes is valid HKDF-SHA256 output length");
let mut nonce_bytes = [0u8; 12];
hk.expand(b"quicnprotochat-hybrid-nonce-v1", &mut nonce_bytes)
.expect("12 bytes is valid HKDF-SHA256 output length");
(*Key::from_slice(&*key_bytes), *Nonce::from_slice(&nonce_bytes))
*Key::from_slice(&*key_bytes)
}
// ── Tests ───────────────────────────────────────────────────────────────────

View File

@@ -9,7 +9,7 @@ use opaque_ke::CipherSuite;
///
/// - **OPRF**: Ristretto255 (curve25519-based, ~128-bit security)
/// - **Key exchange**: Triple-DH (3DH) over Ristretto255 with SHA-512
/// - **KSF**: Identity (no key stretching; upgrade to Argon2 later)
/// - **KSF**: Argon2id (memory-hard key stretching)
pub struct OpaqueSuite;
impl CipherSuite for OpaqueSuite {
@@ -18,5 +18,5 @@ impl CipherSuite for OpaqueSuite {
opaque_ke::Ristretto255,
sha2::Sha512,
>;
type Ksf = opaque_ke::ksf::Identity;
type Ksf = argon2::Argon2<'static>;
}

View File

@@ -26,10 +26,6 @@ fn main() {
let schemas_dir = workspace_root.join("schemas");
// Re-run this build script whenever any schema file changes.
println!(
"cargo:rerun-if-changed={}",
schemas_dir.join("envelope.capnp").display()
);
println!(
"cargo:rerun-if-changed={}",
schemas_dir.join("auth.capnp").display()
@@ -47,7 +43,6 @@ fn main() {
// Treat `schemas/` as the include root so that inter-schema imports
// resolve correctly.
.src_prefix(&schemas_dir)
.file(schemas_dir.join("envelope.capnp"))
.file(schemas_dir.join("auth.capnp"))
.file(schemas_dir.join("delivery.capnp"))
.file(schemas_dir.join("node.capnp"))

View File

@@ -11,22 +11,9 @@
//!
//! `build.rs` invokes `capnpc` at compile time and writes generated Rust source
//! into `$OUT_DIR`. The `include!` macros below splice that code in as a module.
//!
//! # Canonical serialisation (M2+)
//!
//! `build_envelope` uses standard Cap'n Proto wire format. Canonical serialisation
//! (deterministic byte representation for cryptographic signing of KeyPackages and
//! Commits) is added in M2 once the Authentication Service is introduced.
// ── Generated types ───────────────────────────────────────────────────────────
/// Cap'n Proto generated types for `schemas/envelope.capnp`.
///
/// Do not edit this module by hand — it is entirely machine-generated.
pub mod envelope_capnp {
include!(concat!(env!("OUT_DIR"), "/envelope_capnp.rs"));
}
/// Cap'n Proto generated types for `schemas/auth.capnp`.
///
/// Do not edit this module by hand — it is entirely machine-generated.
@@ -48,95 +35,6 @@ pub mod node_capnp {
include!(concat!(env!("OUT_DIR"), "/node_capnp.rs"));
}
// ── Re-exports ────────────────────────────────────────────────────────────────
/// The message-type discriminant from the `Envelope` schema.
///
/// Re-exported here so callers can `use quicnprotochat_proto::MsgType` without
/// spelling out the full generated module path.
pub use envelope_capnp::envelope::MsgType;
// ── Owned envelope type ───────────────────────────────────────────────────────
/// An owned, decoded `Envelope` with no Cap'n Proto reader lifetimes.
///
/// All byte fields are eagerly copied out of the Cap'n Proto reader so that
/// this type is `Send + 'static` and can cross async task boundaries freely.
///
/// # Invariants
///
/// - `group_id` and `sender_id` are either empty (for control messages such as
/// `Ping`/`Pong`) or exactly 32 bytes (SHA-256 digest).
/// - `payload` is empty for `Ping` and `Pong`; non-empty for all MLS variants.
#[derive(Debug, Clone)]
pub struct ParsedEnvelope {
pub msg_type: MsgType,
/// SHA-256 of the group name, or empty for point-to-point control messages.
pub group_id: Vec<u8>,
/// SHA-256 of the sender's Ed25519 identity public key, or empty.
pub sender_id: Vec<u8>,
/// Opaque payload — interpretation is determined by `msg_type`.
pub payload: Vec<u8>,
/// Unix timestamp in milliseconds.
pub timestamp_ms: u64,
}
// ── Serialisation helpers ─────────────────────────────────────────────────────
/// Serialise a [`ParsedEnvelope`] to unpacked Cap'n Proto wire bytes.
///
/// The returned bytes include the Cap'n Proto segment table header followed by
/// the message data. They are suitable for use as the body of a length-prefixed
/// quicnprotochat frame (the frame codec in `quicnprotochat-core` prepends the 4-byte length).
///
/// # Errors
///
/// Returns [`capnp::Error`] if the underlying allocator fails (out of memory).
/// This is not expected under normal operation.
pub fn build_envelope(env: &ParsedEnvelope) -> Result<Vec<u8>, capnp::Error> {
use capnp::message;
let mut message = message::Builder::new_default();
{
let mut root = message.init_root::<envelope_capnp::envelope::Builder>();
root.set_msg_type(env.msg_type);
root.set_group_id(&env.group_id);
root.set_sender_id(&env.sender_id);
root.set_payload(&env.payload);
root.set_timestamp_ms(env.timestamp_ms);
}
to_bytes(&message)
}
/// Deserialise unpacked Cap'n Proto wire bytes into a [`ParsedEnvelope`].
///
/// All data is copied out of the Cap'n Proto reader before returning, so the
/// input slice is not retained.
///
/// # Errors
///
/// - [`capnp::Error`] if the bytes are not valid Cap'n Proto wire format.
/// - [`capnp::Error`] if `msgType` contains a discriminant not present in the
/// current schema (forward-compatibility guard).
pub fn parse_envelope(bytes: &[u8]) -> Result<ParsedEnvelope, capnp::Error> {
let reader = from_bytes(bytes)?;
let root = reader.get_root::<envelope_capnp::envelope::Reader>()?;
let msg_type = root.get_msg_type().map_err(|nis| {
capnp::Error::failed(format!(
"Envelope.msgType contains unknown discriminant: {nis}"
))
})?;
Ok(ParsedEnvelope {
msg_type,
group_id: root.get_group_id()?.to_vec(),
sender_id: root.get_sender_id()?.to_vec(),
payload: root.get_payload()?.to_vec(),
timestamp_ms: root.get_timestamp_ms(),
})
}
// ── Low-level byte ↔ message conversions ──────────────────────────────────────
/// Serialise a Cap'n Proto message builder to unpacked wire bytes.
@@ -162,57 +60,3 @@ pub fn from_bytes(
let mut cursor = std::io::Cursor::new(bytes);
capnp::serialize::read_message(&mut cursor, capnp::message::ReaderOptions::new())
}
// ── Tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
/// Round-trip a Ping envelope through build → parse and verify all fields.
#[test]
fn ping_round_trip() {
let original = ParsedEnvelope {
msg_type: MsgType::Ping,
group_id: vec![],
sender_id: vec![0xAB; 32],
payload: vec![],
timestamp_ms: 1_700_000_000_000,
};
let bytes = build_envelope(&original).expect("build_envelope failed");
let parsed = parse_envelope(&bytes).expect("parse_envelope failed");
assert!(matches!(parsed.msg_type, MsgType::Ping));
assert_eq!(parsed.group_id, original.group_id);
assert_eq!(parsed.sender_id, original.sender_id);
assert_eq!(parsed.payload, original.payload);
assert_eq!(parsed.timestamp_ms, original.timestamp_ms);
}
/// Round-trip a Pong envelope.
#[test]
fn pong_round_trip() {
let original = ParsedEnvelope {
msg_type: MsgType::Pong,
group_id: vec![],
sender_id: vec![0xCD; 32],
payload: vec![],
timestamp_ms: 1_700_000_001_000,
};
let bytes = build_envelope(&original).expect("build_envelope failed");
let parsed = parse_envelope(&bytes).expect("parse_envelope failed");
assert!(matches!(parsed.msg_type, MsgType::Pong));
assert_eq!(parsed.sender_id, original.sender_id);
assert_eq!(parsed.timestamp_ms, original.timestamp_ms);
}
/// Corrupted bytes must produce an error, not a panic.
#[test]
fn corrupted_bytes_error() {
let result = parse_envelope(&[0xFF, 0xFF, 0xFF, 0xFF]);
assert!(result.is_err(), "expected error for corrupted input");
}
}

View File

@@ -35,6 +35,7 @@ rcgen = { workspace = true }
# Crypto — OPAQUE PAKE
opaque-ke = { workspace = true }
rand = { workspace = true }
subtle = { workspace = true }
# Database
rusqlite = { workspace = true }

View File

@@ -0,0 +1,30 @@
//! Structured error codes for server RPC responses.
//!
//! Every `capnp::Error::failed()` message is prefixed with a stable code
//! (E001E020) so clients can match on the code without parsing free-text.
pub const E001_BAD_AUTH_VERSION: &str = "E001";
pub const E002_EMPTY_TOKEN: &str = "E002";
pub const E003_INVALID_TOKEN: &str = "E003";
pub const E004_IDENTITY_KEY_LENGTH: &str = "E004";
pub const E005_PAYLOAD_EMPTY: &str = "E005";
pub const E006_PAYLOAD_TOO_LARGE: &str = "E006";
pub const E007_PACKAGE_EMPTY: &str = "E007";
pub const E008_PACKAGE_TOO_LARGE: &str = "E008";
pub const E009_STORAGE_ERROR: &str = "E009";
pub const E010_OPAQUE_ERROR: &str = "E010";
pub const E011_USERNAME_EMPTY: &str = "E011";
pub const E012_WIRE_VERSION: &str = "E012";
pub const E013_HYBRID_KEY_EMPTY: &str = "E013";
pub const E014_RATE_LIMITED: &str = "E014";
pub const E015_QUEUE_FULL: &str = "E015";
pub const E016_IDENTITY_MISMATCH: &str = "E016";
pub const E017_SESSION_EXPIRED: &str = "E017";
pub const E018_USER_EXISTS: &str = "E018";
pub const E019_NO_PENDING_LOGIN: &str = "E019";
pub const E020_BAD_PARAMS: &str = "E020";
/// Build a `capnp::Error::failed()` with the structured code prefix.
pub fn coded_error(code: &str, msg: impl std::fmt::Display) -> capnp::Error {
capnp::Error::failed(format!("{code}: {msg}"))
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,4 @@
//! SQLCipher-backed persistent storage.
//!
//! Uses `rusqlite` with `bundled-sqlcipher` for encrypted-at-rest storage.
//! Implements the same [`Store`] trait as [`FileBackedStore`] but with proper
//! ACID transactions and indexed queries.
use std::path::Path;
use std::sync::Mutex;
@@ -12,18 +8,11 @@ use rusqlite::{params, Connection};
use crate::storage::{StorageError, Store};
/// SQLCipher-encrypted storage backend.
///
/// All data is stored in a single encrypted SQLite database. The encryption
/// key is set via `PRAGMA key` at open time.
pub struct SqlStore {
conn: Mutex<Connection>,
}
impl SqlStore {
/// Open (or create) an encrypted database at `path`.
///
/// `key` is the passphrase used by SQLCipher. Pass an empty string for an
/// unencrypted database (useful for testing).
pub fn open(path: impl AsRef<Path>, key: &str) -> Result<Self, StorageError> {
let conn = Connection::open(path).map_err(|e| StorageError::Db(e.to_string()))?;
@@ -32,7 +21,6 @@ impl SqlStore {
.map_err(|e| StorageError::Db(format!("PRAGMA key failed: {e}")))?;
}
// Performance pragmas — safe for a single-writer server.
conn.execute_batch(
"PRAGMA journal_mode = WAL;
PRAGMA synchronous = NORMAL;
@@ -47,7 +35,6 @@ impl SqlStore {
Ok(store)
}
/// Create schema tables if they don't exist yet.
fn migrate(&self) -> Result<(), StorageError> {
let conn = self.conn.lock().unwrap();
conn.execute_batch(
@@ -86,6 +73,17 @@ impl SqlStore {
username TEXT PRIMARY KEY,
opaque_record BLOB NOT NULL,
created_at INTEGER DEFAULT (strftime('%s','now'))
);
CREATE TABLE IF NOT EXISTS user_identity_keys (
username TEXT PRIMARY KEY,
identity_key BLOB NOT NULL
);
CREATE TABLE IF NOT EXISTS endpoints (
identity_key BLOB PRIMARY KEY,
node_addr BLOB NOT NULL,
updated_at INTEGER DEFAULT (strftime('%s','now'))
);",
)
.map_err(|e| StorageError::Db(e.to_string()))?;
@@ -111,7 +109,6 @@ impl Store for SqlStore {
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
let conn = self.conn.lock().unwrap();
// Find the oldest KeyPackage (FIFO) and delete it atomically.
let mut stmt = conn
.prepare(
"SELECT id, package_data FROM key_packages
@@ -178,7 +175,6 @@ impl Store for SqlStore {
if !rows.is_empty() {
let ids: Vec<i64> = rows.iter().map(|(id, _)| *id).collect();
// Delete fetched rows in a single statement.
let placeholders: String = ids.iter().map(|_| "?").collect::<Vec<_>>().join(",");
let sql = format!("DELETE FROM deliveries WHERE id IN ({placeholders})");
let params: Vec<&dyn rusqlite::types::ToSql> =
@@ -190,6 +186,76 @@ impl Store for SqlStore {
Ok(rows.into_iter().map(|(_, payload)| payload).collect())
}
fn fetch_limited(
&self,
recipient_key: &[u8],
channel_id: &[u8],
limit: usize,
) -> Result<Vec<Vec<u8>>, StorageError> {
let conn = self.conn.lock().unwrap();
let mut stmt = conn
.prepare(
"SELECT id, payload FROM deliveries
WHERE recipient_key = ?1 AND channel_id = ?2
ORDER BY id ASC
LIMIT ?3",
)
.map_err(|e| StorageError::Db(e.to_string()))?;
let rows: Vec<(i64, Vec<u8>)> = stmt
.query_map(params![recipient_key, channel_id, limit as i64], |row| {
Ok((row.get(0)?, row.get(1)?))
})
.map_err(|e| StorageError::Db(e.to_string()))?
.collect::<Result<Vec<_>, _>>()
.map_err(|e| StorageError::Db(e.to_string()))?;
if !rows.is_empty() {
let ids: Vec<i64> = rows.iter().map(|(id, _)| *id).collect();
let placeholders: String = ids.iter().map(|_| "?").collect::<Vec<_>>().join(",");
let sql = format!("DELETE FROM deliveries WHERE id IN ({placeholders})");
let params: Vec<&dyn rusqlite::types::ToSql> =
ids.iter().map(|id| id as &dyn rusqlite::types::ToSql).collect();
conn.execute(&sql, params.as_slice())
.map_err(|e| StorageError::Db(e.to_string()))?;
}
Ok(rows.into_iter().map(|(_, payload)| payload).collect())
}
fn queue_depth(
&self,
recipient_key: &[u8],
channel_id: &[u8],
) -> Result<usize, StorageError> {
let conn = self.conn.lock().unwrap();
let count: i64 = conn
.query_row(
"SELECT COUNT(*) FROM deliveries WHERE recipient_key = ?1 AND channel_id = ?2",
params![recipient_key, channel_id],
|row| row.get(0),
)
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(count as usize)
}
fn gc_expired_messages(&self, max_age_secs: u64) -> Result<usize, StorageError> {
let conn = self.conn.lock().unwrap();
let cutoff = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs()
.saturating_sub(max_age_secs);
let deleted = conn
.execute(
"DELETE FROM deliveries WHERE created_at < ?1",
params![cutoff as i64],
)
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(deleted)
}
fn upload_hybrid_key(
&self,
identity_key: &[u8],
@@ -256,6 +322,68 @@ impl Store for SqlStore {
.optional()
.map_err(|e| StorageError::Db(e.to_string()))
}
fn has_user_record(&self, username: &str) -> Result<bool, StorageError> {
let conn = self.conn.lock().unwrap();
let exists: bool = conn
.query_row(
"SELECT EXISTS(SELECT 1 FROM users WHERE username = ?1)",
params![username],
|row| row.get(0),
)
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(exists)
}
fn store_user_identity_key(
&self,
username: &str,
identity_key: Vec<u8>,
) -> Result<(), StorageError> {
let conn = self.conn.lock().unwrap();
conn.execute(
"INSERT OR REPLACE INTO user_identity_keys (username, identity_key) VALUES (?1, ?2)",
params![username, identity_key],
)
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(())
}
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
let conn = self.conn.lock().unwrap();
let mut stmt = conn
.prepare("SELECT identity_key FROM user_identity_keys WHERE username = ?1")
.map_err(|e| StorageError::Db(e.to_string()))?;
stmt.query_row(params![username], |row| row.get(0))
.optional()
.map_err(|e| StorageError::Db(e.to_string()))
}
fn publish_endpoint(
&self,
identity_key: &[u8],
node_addr: Vec<u8>,
) -> Result<(), StorageError> {
let conn = self.conn.lock().unwrap();
conn.execute(
"INSERT OR REPLACE INTO endpoints (identity_key, node_addr) VALUES (?1, ?2)",
params![identity_key, node_addr],
)
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(())
}
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
let conn = self.conn.lock().unwrap();
let mut stmt = conn
.prepare("SELECT node_addr FROM endpoints WHERE identity_key = ?1")
.map_err(|e| StorageError::Db(e.to_string()))?;
stmt.query_row(params![identity_key], |row| row.get(0))
.optional()
.map_err(|e| StorageError::Db(e.to_string()))
}
}
/// Convenience extension for `rusqlite::OptionalExtension`.
@@ -284,10 +412,8 @@ mod tests {
#[test]
fn key_package_fifo() {
let store = open_in_memory();
let ik = b"alice_identity_key__32bytes_long";
// Pad to 32 bytes to match real usage
let mut identity = [0u8; 32];
identity[..ik.len()].copy_from_slice(ik);
identity[..31].copy_from_slice(b"alice_identity_key__32bytes_lon");
store
.upload_key_package(&identity, b"kp1".to_vec())
@@ -319,10 +445,55 @@ mod tests {
let msgs = store.fetch(&rk, ch).unwrap();
assert_eq!(msgs, vec![b"msg1".to_vec(), b"msg2".to_vec()]);
// Queue is drained.
assert!(store.fetch(&rk, ch).unwrap().is_empty());
}
#[test]
fn fetch_limited_partial_drain() {
let store = open_in_memory();
let rk = [5u8; 32];
let ch = b"ch";
store.enqueue(&rk, ch, b"a".to_vec()).unwrap();
store.enqueue(&rk, ch, b"b".to_vec()).unwrap();
store.enqueue(&rk, ch, b"c".to_vec()).unwrap();
let msgs = store.fetch_limited(&rk, ch, 2).unwrap();
assert_eq!(msgs, vec![b"a".to_vec(), b"b".to_vec()]);
let remaining = store.fetch(&rk, ch).unwrap();
assert_eq!(remaining, vec![b"c".to_vec()]);
}
#[test]
fn queue_depth_count() {
let store = open_in_memory();
let rk = [6u8; 32];
let ch = b"ch";
assert_eq!(store.queue_depth(&rk, ch).unwrap(), 0);
store.enqueue(&rk, ch, b"x".to_vec()).unwrap();
store.enqueue(&rk, ch, b"y".to_vec()).unwrap();
assert_eq!(store.queue_depth(&rk, ch).unwrap(), 2);
}
#[test]
fn has_user_record_check() {
let store = open_in_memory();
assert!(!store.has_user_record("alice").unwrap());
store.store_user_record("alice", b"record".to_vec()).unwrap();
assert!(store.has_user_record("alice").unwrap());
assert!(!store.has_user_record("bob").unwrap());
}
#[test]
fn user_identity_key_round_trip() {
let store = open_in_memory();
assert!(store.get_user_identity_key("alice").unwrap().is_none());
store.store_user_identity_key("alice", vec![1u8; 32]).unwrap();
assert_eq!(store.get_user_identity_key("alice").unwrap(), Some(vec![1u8; 32]));
}
#[test]
fn hybrid_key_round_trip() {
let store = open_in_memory();
@@ -333,24 +504,6 @@ mod tests {
assert_eq!(store.fetch_hybrid_key(&ik).unwrap(), Some(pk));
}
#[test]
fn hybrid_key_upsert() {
let store = open_in_memory();
let ik = [3u8; 32];
store
.upload_hybrid_key(&ik, b"v1".to_vec())
.unwrap();
store
.upload_hybrid_key(&ik, b"v2".to_vec())
.unwrap();
assert_eq!(
store.fetch_hybrid_key(&ik).unwrap(),
Some(b"v2".to_vec())
);
}
#[test]
fn separate_channels_isolated() {
let store = open_in_memory();

View File

@@ -43,6 +43,24 @@ pub trait Store: Send + Sync {
channel_id: &[u8],
) -> Result<Vec<Vec<u8>>, StorageError>;
/// Fetch up to `limit` messages without draining the entire queue (Fix 8).
fn fetch_limited(
&self,
recipient_key: &[u8],
channel_id: &[u8],
limit: usize,
) -> Result<Vec<Vec<u8>>, StorageError>;
/// Return the number of queued messages for (recipient, channel) (Fix 7).
fn queue_depth(
&self,
recipient_key: &[u8],
channel_id: &[u8],
) -> Result<usize, StorageError>;
/// Delete messages older than `max_age_secs`. Returns count deleted (Fix 7).
fn gc_expired_messages(&self, max_age_secs: u64) -> Result<usize, StorageError>;
fn upload_hybrid_key(
&self,
identity_key: &[u8],
@@ -62,6 +80,29 @@ pub trait Store: Send + Sync {
/// Retrieve an OPAQUE user record by username.
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError>;
/// Check if a user record already exists (Fix 5).
fn has_user_record(&self, username: &str) -> Result<bool, StorageError>;
/// Store identity key for a user (Fix 2).
fn store_user_identity_key(
&self,
username: &str,
identity_key: Vec<u8>,
) -> Result<(), StorageError>;
/// Retrieve identity key for a user (Fix 2).
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError>;
/// Publish a P2P endpoint address for an identity key.
fn publish_endpoint(
&self,
identity_key: &[u8],
node_addr: Vec<u8>,
) -> Result<(), StorageError>;
/// Resolve a peer's P2P endpoint address.
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
}
// ── ChannelKey ───────────────────────────────────────────────────────────────
@@ -100,10 +141,13 @@ pub struct FileBackedStore {
hk_path: PathBuf,
setup_path: PathBuf,
users_path: PathBuf,
identity_keys_path: PathBuf,
key_packages: Mutex<HashMap<Vec<u8>, VecDeque<Vec<u8>>>>,
deliveries: Mutex<HashMap<ChannelKey, VecDeque<Vec<u8>>>>,
hybrid_keys: Mutex<HashMap<Vec<u8>, Vec<u8>>>,
users: Mutex<HashMap<String, Vec<u8>>>,
identity_keys: Mutex<HashMap<String, Vec<u8>>>,
endpoints: Mutex<HashMap<Vec<u8>, Vec<u8>>>,
}
impl FileBackedStore {
@@ -117,11 +161,13 @@ impl FileBackedStore {
let hk_path = dir.join("hybridkeys.bin");
let setup_path = dir.join("server_setup.bin");
let users_path = dir.join("users.bin");
let identity_keys_path = dir.join("identity_keys.bin");
let key_packages = Mutex::new(Self::load_kp_map(&kp_path)?);
let deliveries = Mutex::new(Self::load_delivery_map(&ds_path)?);
let hybrid_keys = Mutex::new(Self::load_hybrid_keys(&hk_path)?);
let users = Mutex::new(Self::load_users(&users_path)?);
let identity_keys = Mutex::new(Self::load_map_string_bytes(&identity_keys_path)?);
Ok(Self {
kp_path,
@@ -129,10 +175,13 @@ impl FileBackedStore {
hk_path,
setup_path,
users_path,
identity_keys_path,
key_packages,
deliveries,
hybrid_keys,
users,
identity_keys,
endpoints: Mutex::new(HashMap::new()),
})
}
@@ -245,6 +294,18 @@ impl FileBackedStore {
}
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
}
fn load_map_string_bytes(path: &Path) -> Result<HashMap<String, Vec<u8>>, StorageError> {
Self::load_users(path)
}
fn flush_map_string_bytes(
&self,
path: &Path,
map: &HashMap<String, Vec<u8>>,
) -> Result<(), StorageError> {
self.flush_users(path, map)
}
}
impl Store for FileBackedStore {
@@ -302,6 +363,46 @@ impl Store for FileBackedStore {
Ok(messages)
}
fn fetch_limited(
&self,
recipient_key: &[u8],
channel_id: &[u8],
limit: usize,
) -> Result<Vec<Vec<u8>>, StorageError> {
let mut map = self.deliveries.lock().unwrap();
let key = ChannelKey {
channel_id: channel_id.to_vec(),
recipient_key: recipient_key.to_vec(),
};
let messages = map
.get_mut(&key)
.map(|q| {
let count = limit.min(q.len());
q.drain(..count).collect()
})
.unwrap_or_default();
self.flush_delivery_map(&self.ds_path, &*map)?;
Ok(messages)
}
fn queue_depth(
&self,
recipient_key: &[u8],
channel_id: &[u8],
) -> Result<usize, StorageError> {
let map = self.deliveries.lock().unwrap();
let key = ChannelKey {
channel_id: channel_id.to_vec(),
recipient_key: recipient_key.to_vec(),
};
Ok(map.get(&key).map(|q| q.len()).unwrap_or(0))
}
fn gc_expired_messages(&self, _max_age_secs: u64) -> Result<usize, StorageError> {
// FileBackedStore does not track timestamps per message — no-op.
Ok(0)
}
fn upload_hybrid_key(
&self,
identity_key: &[u8],
@@ -345,4 +446,39 @@ impl Store for FileBackedStore {
let map = self.users.lock().unwrap();
Ok(map.get(username).cloned())
}
fn has_user_record(&self, username: &str) -> Result<bool, StorageError> {
let map = self.users.lock().unwrap();
Ok(map.contains_key(username))
}
fn store_user_identity_key(
&self,
username: &str,
identity_key: Vec<u8>,
) -> Result<(), StorageError> {
let mut map = self.identity_keys.lock().unwrap();
map.insert(username.to_string(), identity_key);
self.flush_map_string_bytes(&self.identity_keys_path, &*map)
}
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
let map = self.identity_keys.lock().unwrap();
Ok(map.get(username).cloned())
}
fn publish_endpoint(
&self,
identity_key: &[u8],
node_addr: Vec<u8>,
) -> Result<(), StorageError> {
let mut map = self.endpoints.lock().unwrap();
map.insert(identity_key.to_vec(), node_addr);
Ok(())
}
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
let map = self.endpoints.lock().unwrap();
Ok(map.get(identity_key).cloned())
}
}

View File

@@ -1,52 +0,0 @@
# envelope.capnp — top-level wire message for all quicnprotochat traffic.
#
# Every frame exchanged over the Noise channel is serialised as an Envelope.
# The Delivery Service routes by (groupId, msgType) without inspecting payload.
#
# Field sizing rationale:
# groupId / senderId : 32 bytes — SHA-256 digest
# payload : opaque — MLS blob or control data; size bounded by
# the Noise transport max message size (65535 B)
# timestampMs : UInt64 — unix epoch milliseconds; sufficient until year 292M
#
# ID generated with: capnp id
@0xe4a7f2c8b1d63509;
struct Envelope {
# Message type discriminant — determines how payload is interpreted.
msgType @0 :MsgType;
# 32-byte SHA-256 digest of the group name.
# The Delivery Service uses this as its routing key.
# Zero-filled for point-to-point control messages (ping, keyPackageUpload, etc.).
groupId @1 :Data;
# 32-byte SHA-256 digest of the sender's Ed25519 identity public key.
senderId @2 :Data;
# Opaque payload. Interpretation is determined by msgType:
# ping / pong — empty
# keyPackageUpload — openmls-serialised KeyPackage blob
# keyPackageFetch — target identity key (32 bytes)
# keyPackageResponse — openmls-serialised KeyPackage blob (or empty if none)
# mlsWelcome — MLSMessage blob (Welcome variant)
# mlsCommit — MLSMessage blob (PublicMessage / Commit variant)
# mlsApplication — MLSMessage blob (PrivateMessage / Application variant)
# error — UTF-8 error description
payload @3 :Data;
# Unix timestamp in milliseconds at the time of send.
timestampMs @4 :UInt64;
enum MsgType {
ping @0;
pong @1;
keyPackageUpload @2;
keyPackageFetch @3;
keyPackageResponse @4;
mlsWelcome @5;
mlsCommit @6;
mlsApplication @7;
error @8;
}
}

View File

@@ -24,19 +24,21 @@ interface NodeService {
enqueue @2 (recipientKey :Data, payload :Data, channelId :Data, version :UInt16, auth :Auth) -> ();
# Fetch and drain all queued payloads for the recipient.
fetch @3 (recipientKey :Data, channelId :Data, version :UInt16, auth :Auth) -> (payloads :List(Data));
# limit: max number of messages to return (0 = fetch all).
fetch @3 (recipientKey :Data, channelId :Data, version :UInt16, auth :Auth, limit :UInt32) -> (payloads :List(Data));
# Long-poll: wait up to timeoutMs for new payloads, then drain queue.
fetchWait @4 (recipientKey :Data, channelId :Data, version :UInt16, timeoutMs :UInt64, auth :Auth) -> (payloads :List(Data));
# limit: max number of messages to return (0 = fetch all).
fetchWait @4 (recipientKey :Data, channelId :Data, version :UInt16, timeoutMs :UInt64, auth :Auth, limit :UInt32) -> (payloads :List(Data));
# Health probe for readiness/liveness.
health @5 () -> (status :Text);
# Upload the hybrid (X25519 + ML-KEM-768) public key for sealed envelope encryption.
uploadHybridKey @6 (identityKey :Data, hybridPublicKey :Data) -> ();
uploadHybridKey @6 (identityKey :Data, hybridPublicKey :Data, auth :Auth) -> ();
# Fetch a peer's hybrid public key (for post-quantum envelope encryption).
fetchHybridKey @7 (identityKey :Data) -> (hybridPublicKey :Data);
fetchHybridKey @7 (identityKey :Data, auth :Auth) -> (hybridPublicKey :Data);
# ── OPAQUE password-authenticated registration ──────────────────────────
@@ -44,7 +46,7 @@ interface NodeService {
opaqueRegisterStart @8 (username :Text, request :Data) -> (response :Data);
# Finish OPAQUE registration: client uploads sealed credential envelope.
opaqueRegisterFinish @9 (username :Text, upload :Data) -> (success :Bool);
opaqueRegisterFinish @9 (username :Text, upload :Data, identityKey :Data) -> (success :Bool);
# ── OPAQUE password-authenticated login ─────────────────────────────────
@@ -52,7 +54,16 @@ interface NodeService {
opaqueLoginStart @10 (username :Text, request :Data) -> (response :Data);
# Finish OPAQUE login: client sends credential finalization, receives session token.
opaqueLoginFinish @11 (username :Text, finalization :Data) -> (sessionToken :Data);
opaqueLoginFinish @11 (username :Text, finalization :Data, identityKey :Data) -> (sessionToken :Data);
# ── P2P endpoint discovery ────────────────────────────────────────────────
# Publish this node's iroh endpoint address for P2P connectivity.
# nodeAddr is the serialized iroh NodeAddr (JSON or custom encoding).
publishEndpoint @12 (identityKey :Data, nodeAddr :Data, auth :Auth) -> ();
# Resolve a peer's iroh endpoint for direct P2P connection.
resolveEndpoint @13 (identityKey :Data, auth :Auth) -> (nodeAddr :Data);
}
struct Auth {