feat: add protocol comparison docs, P2P crate, production audit, and design fixes

Add comprehensive documentation comparing quicnprotochat against classical
chat protocols (IRC+SSL, XMPP, Telegram) with diagrams and attack scenarios.
Promote comparison pages to top-level sidebar section. Include P2P transport
crate (iroh), production readiness audit, CI workflows, dependency policy,
and continued architecture improvements across all crates.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-22 12:15:44 +01:00
parent 0bdc222724
commit 00b0aa92a1
28 changed files with 1566 additions and 340 deletions

View File

@@ -46,7 +46,7 @@ pub struct ClientAuth {
impl ClientAuth {
/// Build a client auth context from optional token and device id.
/// Requires a non-empty token; we run version=1 only (no legacy mode).
/// Requires a non-empty token (auth version 1).
pub fn from_parts(access_token: String, device_id: Option<String>) -> Self {
let token = access_token.into_bytes();
let device = device_id.unwrap_or_default().into_bytes();
@@ -102,9 +102,8 @@ pub async fn cmd_register_user(
let node_client = connect_node(server, ca_cert, server_name).await?;
// OPAQUE registration step 1: client -> server.
let reg_start =
ClientRegistration::<OpaqueSuite>::start(&mut rng, password.as_bytes())
.map_err(|e| anyhow::anyhow!("OPAQUE register start: {e}"))?;
let reg_start = ClientRegistration::<OpaqueSuite>::start(&mut rng, password.as_bytes())
.map_err(|e| anyhow::anyhow!("OPAQUE register start: {e}"))?;
let mut req = node_client.opaque_register_start_request();
{
@@ -178,9 +177,8 @@ pub async fn cmd_login(
let node_client = connect_node(server, ca_cert, server_name).await?;
// OPAQUE login step 1: client -> server.
let login_start =
ClientLogin::<OpaqueSuite>::start(&mut rng, password.as_bytes())
.map_err(|e| anyhow::anyhow!("OPAQUE login start: {e}"))?;
let login_start = ClientLogin::<OpaqueSuite>::start(&mut rng, password.as_bytes())
.map_err(|e| anyhow::anyhow!("OPAQUE login start: {e}"))?;
let mut req = node_client.opaque_login_start_request();
{
@@ -234,7 +232,10 @@ pub async fn cmd_login(
.context("login_finish: missing session_token")?
.to_vec();
anyhow::ensure!(!session_token.is_empty(), "server returned empty session token");
anyhow::ensure!(
!session_token.is_empty(),
"server returned empty session token"
);
println!("login successful for '{username}'");
println!("session_token: {}", hex::encode(&session_token));
@@ -259,7 +260,7 @@ pub async fn cmd_register(server: &str, ca_cert: &Path, server_name: &str) -> an
p.set_identity_key(&identity.public_key_bytes());
p.set_package(&tls_bytes);
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth);
set_auth(&mut auth)?;
}
let response = req
@@ -316,7 +317,7 @@ pub async fn cmd_register_state(
p.set_identity_key(&member.identity().public_key_bytes());
p.set_package(&tls_bytes);
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth);
set_auth(&mut auth)?;
}
let response = req
@@ -381,7 +382,7 @@ pub async fn cmd_fetch_key(
let mut p = req.get();
p.set_identity_key(&identity_key);
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth);
set_auth(&mut auth)?;
}
let response = req
@@ -487,8 +488,8 @@ pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) ->
.context("Welcome was not delivered to Bob via DS")?;
// Bob unwraps the hybrid envelope and joins the group.
let welcome_bytes = hybrid_decrypt(&bob_hybrid, &raw_welcome)
.context("Bob: hybrid decrypt welcome failed")?;
let welcome_bytes =
hybrid_decrypt(&bob_hybrid, &raw_welcome).context("Bob: hybrid decrypt welcome failed")?;
bob.join_group(&welcome_bytes)
.context("Bob join_group failed")?;
@@ -496,8 +497,7 @@ pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) ->
let ct_ab = alice
.send_message(b"hello bob")
.context("Alice send_message failed")?;
let wrapped_ab =
hybrid_encrypt(&bob_hybrid_pk, &ct_ab).context("hybrid encrypt Alice->Bob")?;
let wrapped_ab = hybrid_encrypt(&bob_hybrid_pk, &ct_ab).context("hybrid encrypt Alice->Bob")?;
enqueue(&alice_ds, &bob_id.public_key_bytes(), &wrapped_ab).await?;
let bob_msgs = fetch_all(&bob_ds, &bob_id.public_key_bytes()).await?;
@@ -528,8 +528,7 @@ pub async fn cmd_demo_group(server: &str, ca_cert: &Path, server_name: &str) ->
let raw_ba = alice_msgs
.first()
.context("Alice: missing Bob ciphertext from DS")?;
let inner_ba =
hybrid_decrypt(&alice_hybrid, raw_ba).context("Alice: hybrid decrypt failed")?;
let inner_ba = hybrid_decrypt(&alice_hybrid, raw_ba).context("Alice: hybrid decrypt failed")?;
let ba_plaintext = alice
.receive_message(&inner_ba)?
.context("Alice expected application message from Bob")?;
@@ -632,7 +631,11 @@ pub async fn cmd_invite(
save_state(state_path, &member, hybrid_kp.as_ref(), password)?;
println!(
"invited peer (welcome queued{}, commit sent to {} existing member(s))",
if peer_hybrid_pk.is_some() { ", hybrid-encrypted" } else { "" },
if peer_hybrid_pk.is_some() {
", hybrid-encrypted"
} else {
""
},
existing_members.len(),
);
Ok(())
@@ -663,8 +666,8 @@ pub async fn cmd_join(
.cloned()
.context("no Welcome found in DS for this identity")?;
// Try hybrid decryption first, fall back to raw MLS welcome.
let welcome_bytes = try_hybrid_unwrap(hybrid_kp.as_ref(), &raw_welcome);
let welcome_bytes = try_hybrid_decrypt(hybrid_kp.as_ref(), &raw_welcome)
.context("decrypt Welcome (hybrid required)")?;
member
.join_group(&welcome_bytes)
@@ -711,7 +714,11 @@ pub async fn cmd_send(
save_state(state_path, &member, hybrid_kp.as_ref(), password)?;
println!(
"message sent{}",
if peer_hybrid_pk.is_some() { " (hybrid-encrypted)" } else { "" }
if peer_hybrid_pk.is_some() {
" (hybrid-encrypted)"
} else {
""
}
);
Ok(())
}
@@ -745,8 +752,13 @@ pub async fn cmd_recv(
}
for (idx, payload) in payloads.iter().enumerate() {
// Try hybrid decryption, fall back to raw MLS payload.
let mls_payload = try_hybrid_unwrap(hybrid_kp.as_ref(), payload);
let mls_payload = match try_hybrid_decrypt(hybrid_kp.as_ref(), payload) {
Ok(b) => b,
Err(e) => {
println!("[{idx}] decrypt error: {e}");
continue;
}
};
match member.receive_message(&mls_payload) {
Ok(Some(pt)) => println!("[{idx}] plaintext: {}", String::from_utf8_lossy(&pt)),
@@ -791,7 +803,8 @@ pub async fn connect_node(
let crypto = QuicClientConfig::try_from(tls)
.map_err(|e| anyhow::anyhow!("invalid client TLS config: {e}"))?;
let mut endpoint = Endpoint::client("0.0.0.0:0".parse().unwrap())?;
let bind_addr: SocketAddr = "0.0.0.0:0".parse().context("parse client bind address")?;
let mut endpoint = Endpoint::client(bind_addr)?;
endpoint.set_default_client_config(ClientConfig::new(Arc::new(crypto)));
let connection = endpoint
@@ -829,7 +842,7 @@ pub async fn upload_key_package(
p.set_identity_key(identity_key);
p.set_package(package);
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth);
set_auth(&mut auth)?;
}
let resp = req
@@ -860,7 +873,7 @@ pub async fn fetch_key_package(
let mut p = req.get();
p.set_identity_key(identity_key);
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth);
set_auth(&mut auth)?;
}
let resp = req
@@ -893,7 +906,7 @@ pub async fn enqueue(
p.set_channel_id(&[]);
p.set_version(1);
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth);
set_auth(&mut auth)?;
}
req.send().promise.await.context("enqueue RPC failed")?;
Ok(())
@@ -910,9 +923,9 @@ pub async fn fetch_all(
p.set_recipient_key(recipient_key);
p.set_channel_id(&[]);
p.set_version(1);
p.set_limit(0); // fetch all (backward compat)
p.set_limit(0); // fetch all
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth);
set_auth(&mut auth)?;
}
let resp = req.send().promise.await.context("fetch RPC failed")?;
@@ -944,9 +957,9 @@ pub async fn fetch_wait(
p.set_timeout_ms(timeout_ms);
p.set_channel_id(&[]);
p.set_version(1);
p.set_limit(0); // fetch all (backward compat)
p.set_limit(0); // fetch all
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth);
set_auth(&mut auth)?;
}
let resp = req.send().promise.await.context("fetch_wait RPC failed")?;
@@ -981,7 +994,7 @@ pub async fn upload_hybrid_key(
p.set_identity_key(identity_key);
p.set_hybrid_public_key(&hybrid_pk.to_bytes());
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth);
set_auth(&mut auth)?;
}
req.send()
.promise
@@ -1002,7 +1015,7 @@ pub async fn fetch_hybrid_key(
let mut p = req.get();
p.set_identity_key(identity_key);
let mut auth = p.reborrow().init_auth();
set_auth(&mut auth);
set_auth(&mut auth)?;
}
let resp = req
@@ -1026,15 +1039,13 @@ pub async fn fetch_hybrid_key(
Ok(Some(pk))
}
/// Try to decrypt a hybrid envelope. If the payload is not a hybrid envelope or
/// decryption fails, return the original bytes unchanged (legacy plaintext MLS).
fn try_hybrid_unwrap(hybrid_kp: Option<&HybridKeypair>, payload: &[u8]) -> Vec<u8> {
if let Some(kp) = hybrid_kp {
if let Ok(inner) = hybrid_decrypt(kp, payload) {
return inner;
}
}
payload.to_vec()
/// Decrypt a hybrid envelope. Requires a hybrid key; no fallback to plaintext MLS.
fn try_hybrid_decrypt(
hybrid_kp: Option<&HybridKeypair>,
payload: &[u8],
) -> anyhow::Result<Vec<u8>> {
let kp = hybrid_kp.ok_or_else(|| anyhow::anyhow!("hybrid key required for decryption"))?;
hybrid_decrypt(kp, payload).map_err(|e| anyhow::anyhow!("{e}"))
}
fn sha256(bytes: &[u8]) -> Vec<u8> {
@@ -1042,20 +1053,21 @@ fn sha256(bytes: &[u8]) -> Vec<u8> {
Sha256::digest(bytes).to_vec()
}
fn set_auth(auth: &mut auth::Builder<'_>) {
let ctx = AUTH_CONTEXT
.get()
.expect("init_auth must be called with a non-empty token before RPCs");
fn set_auth(auth: &mut auth::Builder<'_>) -> anyhow::Result<()> {
let ctx = AUTH_CONTEXT.get().ok_or_else(|| {
anyhow::anyhow!("init_auth must be called with a non-empty token before RPCs")
})?;
auth.set_version(ctx.version);
auth.set_access_token(&ctx.access_token);
auth.set_device_id(&ctx.device_id);
Ok(())
}
#[derive(Serialize, Deserialize)]
struct StoredState {
identity_seed: [u8; 32],
group: Option<Vec<u8>>,
/// Post-quantum hybrid keypair (X25519 + ML-KEM-768). `None` for legacy state files.
/// Post-quantum hybrid keypair (X25519 + ML-KEM-768). `None` for state created before hybrid was added; generated on load if missing.
#[serde(default)]
hybrid_key: Option<HybridKeypairBytes>,
/// Cached member public keys for group participants (Fix 14 prep).
@@ -1081,10 +1093,7 @@ impl StoredState {
Ok((member, hybrid_kp))
}
fn from_parts(
member: &GroupMember,
hybrid_kp: Option<&HybridKeypair>,
) -> anyhow::Result<Self> {
fn from_parts(member: &GroupMember, hybrid_kp: Option<&HybridKeypair>) -> anyhow::Result<Self> {
let group = member
.group_ref()
.map(|g| bincode::serialize(g).context("serialize group"))
@@ -1166,7 +1175,7 @@ fn is_encrypted_state(bytes: &[u8]) -> bool {
fn load_or_init_state(path: &Path, password: Option<&str>) -> anyhow::Result<StoredState> {
if path.exists() {
let mut state = load_existing_state(path, password)?;
// Upgrade legacy state files: generate hybrid keypair if missing.
// Generate hybrid keypair if missing (upgrade from older state).
if state.hybrid_key.is_none() {
state.hybrid_key = Some(HybridKeypair::generate().to_bytes());
write_state(path, &state, password)?;
@@ -1187,9 +1196,8 @@ fn load_existing_state(path: &Path, password: Option<&str>) -> anyhow::Result<St
let bytes = std::fs::read(path).with_context(|| format!("read state file {path:?}"))?;
if is_encrypted_state(&bytes) {
let pw = password.context(
"state file is encrypted (QPCE); a password is required to decrypt it",
)?;
let pw = password
.context("state file is encrypted (QPCE); a password is required to decrypt it")?;
let plaintext = decrypt_state(pw, &bytes)?;
bincode::deserialize(&plaintext).context("decode encrypted state")
} else {

View File

@@ -6,8 +6,7 @@ use clap::{Parser, Subcommand};
use quicnprotochat_client::{
cmd_create_group, cmd_demo_group, cmd_fetch_key, cmd_invite, cmd_join, cmd_login, cmd_ping,
cmd_recv, cmd_register, cmd_register_state, cmd_register_user, cmd_send, ClientAuth,
init_auth,
cmd_recv, cmd_register, cmd_register_state, cmd_register_user, cmd_send, init_auth, ClientAuth,
};
// ── CLI ───────────────────────────────────────────────────────────────────────
@@ -35,7 +34,12 @@ struct Args {
/// Bearer token or OPAQUE session token for authenticated requests.
/// Not required for register-user and login commands.
#[arg(long, global = true, env = "QUICNPROTOCHAT_ACCESS_TOKEN", default_value = "")]
#[arg(
long,
global = true,
env = "QUICNPROTOCHAT_ACCESS_TOKEN",
default_value = ""
)]
access_token: String,
/// Optional device identifier (UUID bytes encoded as hex or raw string).
@@ -327,7 +331,13 @@ async fn main() -> anyhow::Result<()> {
Command::Join { state, server } => {
let local = tokio::task::LocalSet::new();
local
.run_until(cmd_join(&state, &server, &args.ca_cert, &args.server_name, state_pw))
.run_until(cmd_join(
&state,
&server,
&args.ca_cert,
&args.server_name,
state_pw,
))
.await
}
Command::Send {

View File

@@ -1,3 +1,6 @@
// cargo_bin! only works for current package's binary; we spawn quicnprotochat-server from another package.
#![allow(deprecated)]
use std::{path::PathBuf, process::Command, time::Duration};
use assert_cmd::cargo::cargo_bin;
@@ -5,9 +8,14 @@ use portpicker::pick_unused_port;
use tempfile::TempDir;
use tokio::time::sleep;
// Required by rustls 0.23 when QUIC/TLS is used from this process (e.g. client in test).
fn ensure_rustls_provider() {
let _ = rustls::crypto::ring::default_provider().install_default();
}
use quicnprotochat_client::{
cmd_create_group, cmd_invite, cmd_join, cmd_ping, cmd_register_state, cmd_send, ClientAuth,
connect_node, fetch_wait, init_auth,
cmd_create_group, cmd_invite, cmd_join, cmd_ping, cmd_register_state, cmd_send, connect_node,
fetch_wait, init_auth, ClientAuth,
};
use quicnprotochat_core::IdentityKeypair;
@@ -39,6 +47,8 @@ async fn wait_for_health(server: &str, ca_cert: &PathBuf, server_name: &str) ->
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
ensure_rustls_provider();
let temp = TempDir::new()?;
let base = temp.path();
let port = pick_unused_port().expect("free port");
@@ -51,7 +61,7 @@ async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
// Spawn server binary.
let server_bin = cargo_bin("quicnprotochat-server");
let mut child = Command::new(server_bin)
let child = Command::new(server_bin)
.arg("--listen")
.arg(&listen)
.arg("--data-dir")
@@ -108,12 +118,7 @@ async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
.await?;
local
.run_until(cmd_create_group(
&alice_state,
&server,
"test-group",
None,
))
.run_until(cmd_create_group(&alice_state, &server, "test-group", None))
.await?;
// Load Bob identity key from persisted state to use as peer key.
@@ -134,13 +139,7 @@ async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
.await?;
local
.run_until(cmd_join(
&bob_state,
&server,
&ca_cert,
"localhost",
None,
))
.run_until(cmd_join(&bob_state, &server, &ca_cert, "localhost", None))
.await?;
// Send Alice -> Bob.

View File

@@ -185,7 +185,7 @@ impl GroupMember {
/// group exists, or openmls fails.
pub fn add_member(
&mut self,
key_package_bytes: &[u8],
mut key_package_bytes: &[u8],
) -> Result<(Vec<u8>, Vec<u8>), CoreError> {
let group = self
.group
@@ -196,7 +196,7 @@ impl GroupMember {
// TlsSerialize; KeyPackageIn derives TlsDeserialize and provides validate()
// which verifies the signature and returns a trusted KeyPackage.
let key_package: KeyPackage =
KeyPackageIn::tls_deserialize(&mut key_package_bytes.as_ref())
KeyPackageIn::tls_deserialize(&mut key_package_bytes)
.map_err(|e| CoreError::Mls(format!("KeyPackage deserialise: {e:?}")))?
.validate(self.backend.crypto(), ProtocolVersion::Mls10)
.map_err(|e| CoreError::Mls(format!("KeyPackage validate: {e:?}")))?;
@@ -234,9 +234,9 @@ impl GroupMember {
/// KeyPackage, or openmls validation fails.
///
/// [`generate_key_package`]: Self::generate_key_package
pub fn join_group(&mut self, welcome_bytes: &[u8]) -> Result<(), CoreError> {
pub fn join_group(&mut self, mut welcome_bytes: &[u8]) -> Result<(), CoreError> {
// Deserialise MlsMessageIn, then extract the inner Welcome.
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut welcome_bytes.as_ref())
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut welcome_bytes)
.map_err(|e| CoreError::Mls(format!("Welcome deserialise: {e:?}")))?;
// into_welcome() is feature-gated in openmls 0.5; extract() is public.
@@ -291,13 +291,13 @@ impl GroupMember {
///
/// Returns [`CoreError::Mls`] if the message is malformed, fails
/// authentication, or the group state is inconsistent.
pub fn receive_message(&mut self, bytes: &[u8]) -> Result<Option<Vec<u8>>, CoreError> {
pub fn receive_message(&mut self, mut bytes: &[u8]) -> Result<Option<Vec<u8>>, CoreError> {
let group = self
.group
.as_mut()
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut bytes.as_ref())
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut bytes)
.map_err(|e| CoreError::Mls(format!("message deserialise: {e:?}")))?;
// into_protocol_message() is feature-gated; extract() + manual construction is not.

View File

@@ -236,10 +236,7 @@ pub fn hybrid_encrypt(
}
/// Decrypt a hybrid envelope using the recipient's private key.
pub fn hybrid_decrypt(
keypair: &HybridKeypair,
envelope: &[u8],
) -> Result<Vec<u8>, HybridKemError> {
pub fn hybrid_decrypt(keypair: &HybridKeypair, envelope: &[u8]) -> Result<Vec<u8>, HybridKemError> {
if envelope.len() < HEADER_LEN + 16 {
// 16 = minimum AEAD tag
return Err(HybridKemError::TooShort(envelope.len()));
@@ -274,8 +271,8 @@ pub fn hybrid_decrypt(
// 2. ML-KEM decapsulation — convert bytes to the ciphertext array type
// that `DecapsulationKey::decapsulate` expects.
let mlkem_ct_arr = Array::try_from(mlkem_ct_bytes)
.map_err(|_| HybridKemError::MlKemDecapsFailed)?;
let mlkem_ct_arr =
Array::try_from(mlkem_ct_bytes).map_err(|_| HybridKemError::MlKemDecapsFailed)?;
let mlkem_ss = keypair
.mlkem_dk
.decapsulate(&mlkem_ct_arr)
@@ -419,10 +416,7 @@ mod tests {
let restored = HybridKeypair::from_bytes(&bytes).unwrap();
assert_eq!(kp.x25519_pk.to_bytes(), restored.x25519_pk.to_bytes());
assert_eq!(
kp.public_key().mlkem_ek,
restored.public_key().mlkem_ek
);
assert_eq!(kp.public_key().mlkem_ek, restored.public_key().mlkem_ek);
// Verify restored keypair can decrypt
let pk = kp.public_key();

View File

@@ -18,15 +18,44 @@
use openmls::prelude::{
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, KeyPackage,
TlsSerializeTrait,
KeyPackageIn, TlsDeserializeTrait, TlsSerializeTrait,
};
use openmls_rust_crypto::OpenMlsRustCrypto;
use sha2::{Digest, Sha256};
use crate::{error::CoreError, identity::IdentityKeypair};
/// The MLS ciphersuite used throughout quicnprotochat.
const CIPHERSUITE: Ciphersuite = Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
/// The MLS ciphersuite used throughout quicnprotochat (RFC 9420 §17.1).
pub const ALLOWED_CIPHERSUITE: Ciphersuite =
Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
/// Wire value of the allowed ciphersuite (KeyPackage TLS encoding: version 2B, ciphersuite 2B).
const ALLOWED_CIPHERSUITE_WIRE: u16 = 0x0001;
const CIPHERSUITE: Ciphersuite = ALLOWED_CIPHERSUITE;
/// Validates that the KeyPackage bytes use an allowed ciphersuite (Phase 2: ciphersuite allowlist).
///
/// Parses the TLS-encoded KeyPackage and rejects if the ciphersuite is not
/// `MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519`. Does not verify signatures;
/// the server uses this only to enforce policy before storing.
pub fn validate_keypackage_ciphersuite(bytes: &[u8]) -> Result<(), CoreError> {
if bytes.len() < 4 {
return Err(CoreError::Mls("KeyPackage too short for version+ciphersuite".into()));
}
let cs_wire = u16::from_be_bytes([bytes[2], bytes[3]]);
if cs_wire != ALLOWED_CIPHERSUITE_WIRE {
return Err(CoreError::Mls(format!(
"KeyPackage ciphersuite {:#06x} not in allowlist (only {:#06x} allowed)",
cs_wire, ALLOWED_CIPHERSUITE_WIRE
)));
}
// Optionally confirm full parse so we don't accept garbage that happens to have 0x0001 at offset 2.
let mut cursor = bytes;
let _kp = KeyPackageIn::tls_deserialize(&mut cursor)
.map_err(|e| CoreError::Mls(format!("KeyPackage parse: {e:?}")))?;
Ok(())
}
/// Generate a fresh MLS KeyPackage for `identity` and serialise it.
///

View File

@@ -25,9 +25,9 @@ pub mod opaque_auth;
pub use error::CoreError;
pub use group::GroupMember;
pub use hybrid_kem::{
hybrid_decrypt, hybrid_encrypt, HybridKeypair, HybridKeypairBytes, HybridKemError,
hybrid_decrypt, hybrid_encrypt, HybridKemError, HybridKeypair, HybridKeypairBytes,
HybridPublicKey,
};
pub use identity::IdentityKeypair;
pub use keypackage::generate_key_package;
pub use keypackage::{generate_key_package, validate_keypackage_ciphersuite};
pub use keystore::DiskKeyStore;

View File

@@ -14,9 +14,7 @@ pub struct OpaqueSuite;
impl CipherSuite for OpaqueSuite {
type OprfCs = opaque_ke::Ristretto255;
type KeyExchange = opaque_ke::key_exchange::tripledh::TripleDh<
opaque_ke::Ristretto255,
sha2::Sha512,
>;
type KeyExchange =
opaque_ke::key_exchange::tripledh::TripleDh<opaque_ke::Ristretto255, sha2::Sha512>;
type Ksf = argon2::Argon2<'static>;
}

View File

@@ -0,0 +1,12 @@
[package]
name = "quicnprotochat-p2p"
version = "0.1.0"
edition = "2021"
description = "P2P transport layer for quicnprotochat using iroh."
license = "MIT"
[dependencies]
iroh = "0.96"
tokio = { workspace = true }
tracing = { workspace = true }
anyhow = { workspace = true }

View File

@@ -0,0 +1,189 @@
//! P2P transport layer for quicnprotochat using iroh.
//!
//! Provides direct peer-to-peer QUIC connections with NAT traversal via iroh
//! relay servers. When both peers are online, messages bypass the central
//! server entirely.
//!
//! # Architecture
//!
//! ```text
//! Client A ── iroh direct (QUIC) ── Client B (preferred: low latency)
//! │ │
//! └── QUIC/TLS ── Server ── QUIC/TLS ┘ (fallback: store-and-forward)
//! ```
use iroh::{Endpoint, EndpointAddr, PublicKey, SecretKey};
/// ALPN protocol identifier for quicnprotochat P2P messaging.
const P2P_ALPN: &[u8] = b"quicnprotochat/p2p/1";
/// A P2P node backed by an iroh endpoint.
///
/// Manages direct QUIC connections to peers with automatic NAT traversal.
pub struct P2pNode {
endpoint: Endpoint,
}
/// Received P2P message with sender information.
pub struct P2pMessage {
pub sender: PublicKey,
pub payload: Vec<u8>,
}
impl P2pNode {
/// Start a new P2P node.
///
/// Generates a fresh identity or reuses a provided secret key.
pub async fn start(secret_key: Option<SecretKey>) -> anyhow::Result<Self> {
let mut builder = Endpoint::builder();
if let Some(sk) = secret_key {
builder = builder.secret_key(sk);
}
builder = builder.alpns(vec![P2P_ALPN.to_vec()]);
let endpoint = builder.bind().await?;
tracing::info!(
node_id = %endpoint.id().fmt_short(),
"P2P node started"
);
Ok(Self { endpoint })
}
/// This node's public key (used as node ID for peer discovery).
pub fn node_id(&self) -> PublicKey {
self.endpoint.id()
}
/// This node's secret key (for persistence across restarts).
pub fn secret_key(&self) -> SecretKey {
self.endpoint.secret_key().clone()
}
/// Get the node's network address information for publishing to discovery.
pub fn endpoint_addr(&self) -> EndpointAddr {
self.endpoint.addr()
}
/// Send a payload directly to a peer via P2P QUIC.
pub async fn send(&self, peer: impl Into<EndpointAddr>, payload: &[u8]) -> anyhow::Result<()> {
let peer = peer.into();
let conn = self.endpoint.connect(peer, P2P_ALPN).await?;
let mut send = conn.open_uni().await.map_err(|e| anyhow::anyhow!("{e}"))?;
// Simple framing: 4-byte length prefix + payload.
let len = (payload.len() as u32).to_be_bytes();
send.write_all(&len)
.await
.map_err(|e| anyhow::anyhow!("{e}"))?;
send.write_all(payload)
.await
.map_err(|e| anyhow::anyhow!("{e}"))?;
send.finish().map_err(|e| anyhow::anyhow!("{e}"))?;
// Wait until the peer has consumed the stream before dropping.
send.stopped().await.map_err(|e| anyhow::anyhow!("{e}"))?;
tracing::debug!(
peer = %conn.remote_id().fmt_short(),
bytes = payload.len(),
"P2P message sent"
);
Ok(())
}
/// Accept a single incoming P2P message.
///
/// Blocks until a peer connects and sends data.
pub async fn recv(&self) -> anyhow::Result<P2pMessage> {
let incoming = self
.endpoint
.accept()
.await
.ok_or_else(|| anyhow::anyhow!("no more incoming connections"))?;
let conn = incoming.await.map_err(|e| anyhow::anyhow!("{e}"))?;
let sender = conn.remote_id();
let mut recv = conn
.accept_uni()
.await
.map_err(|e| anyhow::anyhow!("{e}"))?;
// Read length-prefixed payload.
let mut len_buf = [0u8; 4];
recv.read_exact(&mut len_buf)
.await
.map_err(|e| anyhow::anyhow!("{e}"))?;
let len = u32::from_be_bytes(len_buf) as usize;
if len > 5 * 1024 * 1024 {
anyhow::bail!("P2P payload too large: {len} bytes");
}
let mut payload = vec![0u8; len];
recv.read_exact(&mut payload)
.await
.map_err(|e| anyhow::anyhow!("{e}"))?;
tracing::debug!(
peer = %sender.fmt_short(),
bytes = len,
"P2P message received"
);
Ok(P2pMessage { sender, payload })
}
/// Gracefully shut down the P2P node.
pub async fn close(self) {
self.endpoint.close().await;
}
}
#[cfg(test)]
mod tests {
use super::*;
use iroh::RelayMode;
/// Create a local-only P2P node with relays disabled (for testing).
async fn local_node() -> P2pNode {
let endpoint = Endpoint::builder()
.alpns(vec![P2P_ALPN.to_vec()])
.relay_mode(RelayMode::Disabled)
.bind()
.await
.unwrap();
P2pNode { endpoint }
}
#[tokio::test]
async fn p2p_round_trip() {
let alice = local_node().await;
let bob = local_node().await;
let bob_addr = bob.endpoint_addr();
let alice_id = alice.node_id();
let payload = b"hello from alice via P2P";
// Spawn Bob's receiver.
let bob_handle = tokio::spawn(async move {
let msg = bob.recv().await.unwrap();
assert_eq!(msg.payload, payload.to_vec());
assert_eq!(msg.sender, alice_id);
});
// Give Bob a moment to start accepting.
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
alice.send(bob_addr, payload).await.unwrap();
// Wait for Bob to finish reading before closing.
bob_handle.await.unwrap();
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
alice.close().await;
}
}

View File

@@ -1,5 +1,8 @@
//! Cap'n Proto schemas, generated types, and serialisation helpers for quicnprotochat.
//!
//! Generated Cap'n Proto code emits unnecessary parentheses; allow per coding standards.
#![allow(unused_parens)]
//! # Design constraints
//!
//! This crate is intentionally restricted:

View File

@@ -23,6 +23,7 @@ pub const E017_SESSION_EXPIRED: &str = "E017";
pub const E018_USER_EXISTS: &str = "E018";
pub const E019_NO_PENDING_LOGIN: &str = "E019";
pub const E020_BAD_PARAMS: &str = "E020";
pub const E021_CIPHERSUITE_NOT_ALLOWED: &str = "E021";
/// Build a `capnp::Error::failed()` with the structured code prefix.
pub fn coded_error(code: &str, msg: impl std::fmt::Display) -> capnp::Error {

View File

@@ -13,10 +13,15 @@
//! The entire RPC stack lives on a `tokio::task::LocalSet` spawned per
//! connection.
use std::{fs, net::SocketAddr, path::{Path, PathBuf}, sync::Arc, time::Duration};
use std::{
fs,
net::SocketAddr,
path::{Path, PathBuf},
sync::Arc,
time::Duration,
};
use anyhow::Context;
use serde::Deserialize;
use capnp::capability::Promise;
use capnp_rpc::{rpc_twoparty_capnp::Side, twoparty, RpcSystem};
use clap::Parser;
@@ -33,6 +38,7 @@ use rand::rngs::OsRng;
use rcgen::generate_simple_self_signed;
use rustls::pki_types::{CertificateDer, PrivateKeyDer};
use rustls::version::TLS13;
use serde::Deserialize;
use sha2::{Digest, Sha256};
use subtle::ConstantTimeEq;
use tokio::sync::Notify;
@@ -44,11 +50,11 @@ mod sql_store;
mod storage;
use error_codes::*;
use sql_store::SqlStore;
use storage::{FileBackedStore, Store, StorageError};
use storage::{FileBackedStore, StorageError, Store};
const MAX_PAYLOAD_BYTES: usize = 5 * 1024 * 1024; // 5 MB cap per message
const MAX_KEYPACKAGE_BYTES: usize = 1 * 1024 * 1024; // 1 MB cap per KeyPackage
const CURRENT_WIRE_VERSION: u16 = 1; // legacy disabled; current wire version only
const CURRENT_WIRE_VERSION: u16 = 1;
const DEFAULT_LISTEN: &str = "0.0.0.0:7000";
const DEFAULT_DATA_DIR: &str = "data";
@@ -71,7 +77,9 @@ struct AuthConfig {
impl AuthConfig {
fn new(required_token: Option<String>) -> Self {
let required_token = required_token.filter(|s| !s.is_empty()).map(|s| s.into_bytes());
let required_token = required_token
.filter(|s| !s.is_empty())
.map(|s| s.into_bytes());
Self { required_token }
}
}
@@ -110,34 +118,42 @@ fn load_config(path: Option<&Path>) -> anyhow::Result<FileConfig> {
return Ok(FileConfig::default());
}
let contents = fs::read_to_string(&path)
.with_context(|| format!("read config file {path:?}"))?;
let cfg: FileConfig = toml::from_str(&contents)
.with_context(|| format!("parse config file {path:?}"))?;
let contents =
fs::read_to_string(&path).with_context(|| format!("read config file {path:?}"))?;
let cfg: FileConfig =
toml::from_str(&contents).with_context(|| format!("parse config file {path:?}"))?;
Ok(cfg)
}
fn merge_config(args: &Args, file: &FileConfig) -> EffectiveConfig {
let listen = if args.listen == DEFAULT_LISTEN {
file.listen.clone().unwrap_or_else(|| DEFAULT_LISTEN.to_string())
file.listen
.clone()
.unwrap_or_else(|| DEFAULT_LISTEN.to_string())
} else {
args.listen.clone()
};
let data_dir = if args.data_dir == DEFAULT_DATA_DIR {
file.data_dir.clone().unwrap_or_else(|| DEFAULT_DATA_DIR.to_string())
file.data_dir
.clone()
.unwrap_or_else(|| DEFAULT_DATA_DIR.to_string())
} else {
args.data_dir.clone()
};
let tls_cert = if args.tls_cert == PathBuf::from(DEFAULT_TLS_CERT) {
file.tls_cert.clone().unwrap_or_else(|| PathBuf::from(DEFAULT_TLS_CERT))
file.tls_cert
.clone()
.unwrap_or_else(|| PathBuf::from(DEFAULT_TLS_CERT))
} else {
args.tls_cert.clone()
};
let tls_key = if args.tls_key == PathBuf::from(DEFAULT_TLS_KEY) {
file.tls_key.clone().unwrap_or_else(|| PathBuf::from(DEFAULT_TLS_KEY))
file.tls_key
.clone()
.unwrap_or_else(|| PathBuf::from(DEFAULT_TLS_KEY))
} else {
args.tls_key.clone()
};
@@ -231,7 +247,11 @@ struct Args {
// ── Session management ──────────────────────────────────────────────────────
struct SessionInfo {
/// For future audit logging.
#[allow(dead_code)]
username: String,
/// For future audit logging.
#[allow(dead_code)]
identity_key: Vec<u8>,
#[allow(dead_code)]
created_at: u64,
@@ -289,9 +309,12 @@ impl node_service::Server for NodeServiceImpl {
params: node_service::UploadKeyPackageParams,
mut results: node_service::UploadKeyPackageResults,
) -> Promise<(), capnp::Error> {
let params = params
.get()
.map_err(|e| coded_error(E020_BAD_PARAMS, format!("upload_key_package: bad params: {e}")));
let params = params.get().map_err(|e| {
coded_error(
E020_BAD_PARAMS,
format!("upload_key_package: bad params: {e}"),
)
});
let (identity_key, package) = match params {
Ok(p) => {
@@ -314,7 +337,10 @@ impl node_service::Server for NodeServiceImpl {
if identity_key.len() != 32 {
return Promise::err(coded_error(
E004_IDENTITY_KEY_LENGTH,
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
format!(
"identityKey must be exactly 32 bytes, got {}",
identity_key.len()
),
));
}
if package.is_empty() {
@@ -327,6 +353,14 @@ impl node_service::Server for NodeServiceImpl {
));
}
// Phase 2: ciphersuite allowlist — reject KeyPackages not using the allowed MLS ciphersuite.
if let Err(e) = quicnprotochat_core::validate_keypackage_ciphersuite(&package) {
return Promise::err(coded_error(
E021_CIPHERSUITE_NOT_ALLOWED,
format!("KeyPackage ciphersuite not allowed: {e}"),
));
}
let fingerprint: Vec<u8> = Sha256::digest(&package).to_vec();
if let Err(e) = self
.store
@@ -371,7 +405,10 @@ impl node_service::Server for NodeServiceImpl {
if identity_key.len() != 32 {
return Promise::err(coded_error(
E004_IDENTITY_KEY_LENGTH,
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
format!(
"identityKey must be exactly 32 bytes, got {}",
identity_key.len()
),
));
}
@@ -424,15 +461,19 @@ impl node_service::Server for NodeServiceImpl {
};
let channel_id = p.get_channel_id().unwrap_or_default().to_vec();
let version = p.get_version();
let auth_token = match validate_auth_return_token(&self.auth_cfg, &self.sessions, p.get_auth()) {
Ok(t) => t,
Err(e) => return Promise::err(e),
};
let auth_token =
match validate_auth_return_token(&self.auth_cfg, &self.sessions, p.get_auth()) {
Ok(t) => t,
Err(e) => return Promise::err(e),
};
if recipient_key.len() != 32 {
return Promise::err(coded_error(
E004_IDENTITY_KEY_LENGTH,
format!("recipientKey must be exactly 32 bytes, got {}", recipient_key.len()),
format!(
"recipientKey must be exactly 32 bytes, got {}",
recipient_key.len()
),
));
}
if payload.is_empty() {
@@ -447,7 +488,10 @@ impl node_service::Server for NodeServiceImpl {
if version != CURRENT_WIRE_VERSION {
return Promise::err(coded_error(
E012_WIRE_VERSION,
format!("unsupported wire version {} (expected {CURRENT_WIRE_VERSION})", version),
format!(
"unsupported wire version {} (expected {CURRENT_WIRE_VERSION})",
version
),
));
}
@@ -510,11 +554,7 @@ impl node_service::Server for NodeServiceImpl {
.ok()
.map(|p| p.get_version())
.unwrap_or(CURRENT_WIRE_VERSION);
let limit = params
.get()
.ok()
.map(|p| p.get_limit())
.unwrap_or(0);
let limit = params.get().ok().map(|p| p.get_limit()).unwrap_or(0);
if let Err(e) = params
.get()
.ok()
@@ -527,23 +567,37 @@ impl node_service::Server for NodeServiceImpl {
if recipient_key.len() != 32 {
return Promise::err(coded_error(
E004_IDENTITY_KEY_LENGTH,
format!("recipientKey must be exactly 32 bytes, got {}", recipient_key.len()),
format!(
"recipientKey must be exactly 32 bytes, got {}",
recipient_key.len()
),
));
}
if version != CURRENT_WIRE_VERSION {
return Promise::err(coded_error(
E012_WIRE_VERSION,
format!("unsupported wire version {} (expected {CURRENT_WIRE_VERSION})", version),
format!(
"unsupported wire version {} (expected {CURRENT_WIRE_VERSION})",
version
),
));
}
let messages = if limit > 0 {
match self.store.fetch_limited(&recipient_key, &channel_id, limit as usize).map_err(storage_err) {
match self
.store
.fetch_limited(&recipient_key, &channel_id, limit as usize)
.map_err(storage_err)
{
Ok(m) => m,
Err(e) => return Promise::err(e),
}
} else {
match self.store.fetch(&recipient_key, &channel_id).map_err(storage_err) {
match self
.store
.fetch(&recipient_key, &channel_id)
.map_err(storage_err)
{
Ok(m) => m,
Err(e) => return Promise::err(e),
}
@@ -588,13 +642,19 @@ impl node_service::Server for NodeServiceImpl {
if recipient_key.len() != 32 {
return Promise::err(coded_error(
E004_IDENTITY_KEY_LENGTH,
format!("recipientKey must be exactly 32 bytes, got {}", recipient_key.len()),
format!(
"recipientKey must be exactly 32 bytes, got {}",
recipient_key.len()
),
));
}
if version != CURRENT_WIRE_VERSION {
return Promise::err(coded_error(
E012_WIRE_VERSION,
format!("unsupported wire version {} (expected {CURRENT_WIRE_VERSION})", version),
format!(
"unsupported wire version {} (expected {CURRENT_WIRE_VERSION})",
version
),
));
}
@@ -602,7 +662,11 @@ impl node_service::Server for NodeServiceImpl {
let waiters = self.waiters.clone();
Promise::from_future(async move {
let fetch_fn = |s: &Arc<dyn Store>, rk: &[u8], ch: &[u8], lim: u32| -> Result<Vec<Vec<u8>>, capnp::Error> {
let fetch_fn = |s: &Arc<dyn Store>,
rk: &[u8],
ch: &[u8],
lim: u32|
-> Result<Vec<Vec<u8>>, capnp::Error> {
if lim > 0 {
s.fetch_limited(rk, ch, lim as usize).map_err(storage_err)
} else {
@@ -664,7 +728,10 @@ impl node_service::Server for NodeServiceImpl {
if identity_key.len() != 32 {
return Promise::err(coded_error(
E004_IDENTITY_KEY_LENGTH,
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
format!(
"identityKey must be exactly 32 bytes, got {}",
identity_key.len()
),
));
}
if hybrid_pk.is_empty() {
@@ -713,7 +780,10 @@ impl node_service::Server for NodeServiceImpl {
if identity_key.len() != 32 {
return Promise::err(coded_error(
E004_IDENTITY_KEY_LENGTH,
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
format!(
"identityKey must be exactly 32 bytes, got {}",
identity_key.len()
),
));
}
@@ -767,7 +837,10 @@ impl node_service::Server for NodeServiceImpl {
};
if username.is_empty() {
return Promise::err(coded_error(E011_USERNAME_EMPTY, "username must not be empty"));
return Promise::err(coded_error(
E011_USERNAME_EMPTY,
"username must not be empty",
));
}
let reg_request = match RegistrationRequest::<OpaqueSuite>::deserialize(&request_bytes) {
@@ -821,7 +894,10 @@ impl node_service::Server for NodeServiceImpl {
let identity_key = p.get_identity_key().unwrap_or_default().to_vec();
if username.is_empty() {
return Promise::err(coded_error(E011_USERNAME_EMPTY, "username must not be empty"));
return Promise::err(coded_error(
E011_USERNAME_EMPTY,
"username must not be empty",
));
}
// Fix 5: Registration collision check
@@ -894,19 +970,22 @@ impl node_service::Server for NodeServiceImpl {
};
if username.is_empty() {
return Promise::err(coded_error(E011_USERNAME_EMPTY, "username must not be empty"));
return Promise::err(coded_error(
E011_USERNAME_EMPTY,
"username must not be empty",
));
}
let credential_request =
match CredentialRequest::<OpaqueSuite>::deserialize(&request_bytes) {
Ok(r) => r,
Err(e) => {
return Promise::err(coded_error(
E010_OPAQUE_ERROR,
format!("invalid credential request: {e}"),
))
}
};
let credential_request = match CredentialRequest::<OpaqueSuite>::deserialize(&request_bytes)
{
Ok(r) => r,
Err(e) => {
return Promise::err(coded_error(
E010_OPAQUE_ERROR,
format!("invalid credential request: {e}"),
))
}
};
// Load user's OPAQUE password file (if registered).
let password_file = match self.store.get_user_record(&username) {
@@ -978,7 +1057,10 @@ impl node_service::Server for NodeServiceImpl {
let identity_key = p.get_identity_key().unwrap_or_default().to_vec();
if username.is_empty() {
return Promise::err(coded_error(E011_USERNAME_EMPTY, "username must not be empty"));
return Promise::err(coded_error(
E011_USERNAME_EMPTY,
"username must not be empty",
));
}
// Retrieve the pending ServerLogin state.
@@ -1081,11 +1163,18 @@ impl node_service::Server for NodeServiceImpl {
if identity_key.len() != 32 {
return Promise::err(coded_error(
E004_IDENTITY_KEY_LENGTH,
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
format!(
"identityKey must be exactly 32 bytes, got {}",
identity_key.len()
),
));
}
if let Err(e) = self.store.publish_endpoint(&identity_key, node_addr).map_err(storage_err) {
if let Err(e) = self
.store
.publish_endpoint(&identity_key, node_addr)
.map_err(storage_err)
{
return Promise::err(e);
}
@@ -1113,11 +1202,18 @@ impl node_service::Server for NodeServiceImpl {
if identity_key.len() != 32 {
return Promise::err(coded_error(
E004_IDENTITY_KEY_LENGTH,
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
format!(
"identityKey must be exactly 32 bytes, got {}",
identity_key.len()
),
));
}
match self.store.resolve_endpoint(&identity_key).map_err(storage_err) {
match self
.store
.resolve_endpoint(&identity_key)
.map_err(storage_err)
{
Ok(Some(addr)) => {
results.get().set_node_addr(&addr);
}
@@ -1148,9 +1244,10 @@ fn check_rate_limit(
token: &[u8],
) -> Result<(), capnp::Error> {
let now = current_timestamp();
let mut entry = rate_limits
.entry(token.to_vec())
.or_insert(RateEntry { count: 0, window_start: now });
let mut entry = rate_limits.entry(token.to_vec()).or_insert(RateEntry {
count: 0,
window_start: now,
});
if now - entry.window_start >= RATE_LIMIT_WINDOW_SECS {
entry.count = 1;
@@ -1222,17 +1319,14 @@ fn validate_auth_return_token(
// Expired — will be cleaned up by background task.
drop(session);
sessions.remove(&token);
return Err(coded_error(E017_SESSION_EXPIRED, "session token has expired"));
return Err(coded_error(
E017_SESSION_EXPIRED,
"session token has expired",
));
}
// If a static token is configured but neither matched, reject.
if cfg.required_token.is_some() {
return Err(coded_error(E003_INVALID_TOKEN, "invalid accessToken"));
}
// No static token configured and no session match — accept any non-empty
// token for backward compatibility (dev mode).
Ok(token)
// Require either static token or valid session; no legacy accept-any-token.
Err(coded_error(E003_INVALID_TOKEN, "invalid accessToken"))
}
// ── Entry point ───────────────────────────────────────────────────────────────
@@ -1250,12 +1344,19 @@ async fn main() -> anyhow::Result<()> {
let file_cfg = load_config(args.config.as_deref())?;
let effective = merge_config(&args, &file_cfg);
let production = std::env::var("QUICNPROTOCHAT_PRODUCTION")
.map(|v| matches!(v.to_lowercase().as_str(), "1" | "true" | "yes"))
.unwrap_or(false);
if production {
validate_production_config(&effective)?;
}
let listen: SocketAddr = effective
.listen
.parse()
.context("--listen must be host:port")?;
let server_config = build_server_config(&effective.tls_cert, &effective.tls_key)
let server_config = build_server_config(&effective.tls_cert, &effective.tls_key, production)
.context("failed to build TLS/QUIC server config")?;
// Shared storage — persisted to disk for restart safety.
@@ -1322,11 +1423,14 @@ async fn main() -> anyhow::Result<()> {
pending_logins.retain(|_, pl| now - pl.created_at < PENDING_LOGIN_TTL_SECS);
// Expire stale rate limit entries (Fix 6)
rate_limits.retain(|_, entry| now - entry.window_start < RATE_LIMIT_WINDOW_SECS * 2);
rate_limits
.retain(|_, entry| now - entry.window_start < RATE_LIMIT_WINDOW_SECS * 2);
// GC expired messages (Fix 7)
match store.gc_expired_messages(MESSAGE_TTL_SECS) {
Ok(n) if n > 0 => tracing::debug!(expired = n, "garbage collected expired messages"),
Ok(n) if n > 0 => {
tracing::debug!(expired = n, "garbage collected expired messages")
}
Err(e) => tracing::warn!(error = %e, "message GC failed"),
_ => {}
}
@@ -1347,42 +1451,54 @@ async fn main() -> anyhow::Result<()> {
local
.run_until(async move {
loop {
let incoming = match endpoint.accept().await {
Some(i) => i,
None => break,
};
tokio::select! {
biased;
let connecting = match incoming.accept() {
Ok(c) => c,
Err(e) => {
tracing::warn!(error = %e, "failed to accept incoming connection");
continue;
}
};
incoming = endpoint.accept() => {
let incoming = match incoming {
Some(i) => i,
None => break,
};
let store = Arc::clone(&store);
let waiters = Arc::clone(&waiters);
let auth_cfg = Arc::clone(&auth_cfg);
let opaque_setup = Arc::clone(&opaque_setup);
let pending_logins = Arc::clone(&pending_logins);
let sessions = Arc::clone(&sessions);
let rate_limits = Arc::clone(&rate_limits);
tokio::task::spawn_local(async move {
if let Err(e) = handle_node_connection(
connecting,
store,
waiters,
auth_cfg,
opaque_setup,
pending_logins,
sessions,
rate_limits,
)
.await
{
tracing::warn!(error = %e, "connection error");
let connecting = match incoming.accept() {
Ok(c) => c,
Err(e) => {
tracing::warn!(error = %e, "failed to accept incoming connection");
continue;
}
};
let store = Arc::clone(&store);
let waiters = Arc::clone(&waiters);
let auth_cfg = Arc::clone(&auth_cfg);
let opaque_setup = Arc::clone(&opaque_setup);
let pending_logins = Arc::clone(&pending_logins);
let sessions = Arc::clone(&sessions);
let rate_limits = Arc::clone(&rate_limits);
tokio::task::spawn_local(async move {
if let Err(e) = handle_node_connection(
connecting,
store,
waiters,
auth_cfg,
opaque_setup,
pending_logins,
sessions,
rate_limits,
)
.await
{
tracing::warn!(error = %e, "connection error");
}
});
}
});
_ = tokio::signal::ctrl_c() => {
tracing::info!("shutdown signal received, draining QUIC connections");
endpoint.close(0u32.into(), b"server shutdown");
break;
}
}
}
Ok::<(), anyhow::Error>(())
@@ -1393,6 +1509,7 @@ async fn main() -> anyhow::Result<()> {
// ── Per-connection handlers ───────────────────────────────────────────────────
/// Handle one NodeService connection.
#[allow(clippy::too_many_arguments)]
async fn handle_node_connection(
connecting: quinn::Connecting,
store: Arc<dyn Store>,
@@ -1438,9 +1555,45 @@ fn fmt_hex(bytes: &[u8]) -> String {
format!("{hex}")
}
fn validate_production_config(effective: &EffectiveConfig) -> anyhow::Result<()> {
let token = effective
.auth_token
.as_deref()
.filter(|s| !s.is_empty())
.ok_or_else(|| {
anyhow::anyhow!("production requires QUICNPROTOCHAT_AUTH_TOKEN (non-empty)")
})?;
if token == "devtoken" {
anyhow::bail!(
"production forbids auth_token 'devtoken'; set a strong QUICNPROTOCHAT_AUTH_TOKEN"
);
}
if effective.store_backend == "sql" && effective.db_key.is_empty() {
anyhow::bail!("production with store_backend=sql requires non-empty QUICNPROTOCHAT_DB_KEY");
}
if !effective.tls_cert.exists() || !effective.tls_key.exists() {
anyhow::bail!(
"production requires existing TLS cert and key (no auto-generation); provide QUICNPROTOCHAT_TLS_CERT and QUICNPROTOCHAT_TLS_KEY"
);
}
Ok(())
}
/// Ensure a self-signed certificate exists on disk and return a QUIC server config.
fn build_server_config(cert_path: &PathBuf, key_path: &PathBuf) -> anyhow::Result<ServerConfig> {
/// When `production` is true, cert and key must already exist (no auto-generation).
fn build_server_config(
cert_path: &PathBuf,
key_path: &PathBuf,
production: bool,
) -> anyhow::Result<ServerConfig> {
if !cert_path.exists() || !key_path.exists() {
if production {
anyhow::bail!(
"TLS cert or key missing at {:?} / {:?}; production mode forbids auto-generation",
cert_path,
key_path
);
}
generate_self_signed_cert(cert_path, key_path)?;
}

View File

@@ -13,6 +13,12 @@ pub struct SqlStore {
}
impl SqlStore {
fn lock_conn(&self) -> Result<std::sync::MutexGuard<'_, Connection>, StorageError> {
self.conn
.lock()
.map_err(|e| StorageError::Db(format!("lock poisoned: {e}")))
}
pub fn open(path: impl AsRef<Path>, key: &str) -> Result<Self, StorageError> {
let conn = Connection::open(path).map_err(|e| StorageError::Db(e.to_string()))?;
@@ -36,7 +42,7 @@ impl SqlStore {
}
fn migrate(&self) -> Result<(), StorageError> {
let conn = self.conn.lock().unwrap();
let conn = self.lock_conn()?;
conn.execute_batch(
"CREATE TABLE IF NOT EXISTS key_packages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
@@ -97,7 +103,7 @@ impl Store for SqlStore {
identity_key: &[u8],
package: Vec<u8>,
) -> Result<(), StorageError> {
let conn = self.conn.lock().unwrap();
let conn = self.lock_conn()?;
conn.execute(
"INSERT INTO key_packages (identity_key, package_data) VALUES (?1, ?2)",
params![identity_key, package],
@@ -107,7 +113,7 @@ impl Store for SqlStore {
}
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
let conn = self.conn.lock().unwrap();
let conn = self.lock_conn()?;
let mut stmt = conn
.prepare(
@@ -141,7 +147,7 @@ impl Store for SqlStore {
channel_id: &[u8],
payload: Vec<u8>,
) -> Result<(), StorageError> {
let conn = self.conn.lock().unwrap();
let conn = self.lock_conn()?;
conn.execute(
"INSERT INTO deliveries (recipient_key, channel_id, payload) VALUES (?1, ?2, ?3)",
params![recipient_key, channel_id, payload],
@@ -150,12 +156,8 @@ impl Store for SqlStore {
Ok(())
}
fn fetch(
&self,
recipient_key: &[u8],
channel_id: &[u8],
) -> Result<Vec<Vec<u8>>, StorageError> {
let conn = self.conn.lock().unwrap();
fn fetch(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<Vec<Vec<u8>>, StorageError> {
let conn = self.lock_conn()?;
let mut stmt = conn
.prepare(
@@ -177,8 +179,10 @@ impl Store for SqlStore {
let ids: Vec<i64> = rows.iter().map(|(id, _)| *id).collect();
let placeholders: String = ids.iter().map(|_| "?").collect::<Vec<_>>().join(",");
let sql = format!("DELETE FROM deliveries WHERE id IN ({placeholders})");
let params: Vec<&dyn rusqlite::types::ToSql> =
ids.iter().map(|id| id as &dyn rusqlite::types::ToSql).collect();
let params: Vec<&dyn rusqlite::types::ToSql> = ids
.iter()
.map(|id| id as &dyn rusqlite::types::ToSql)
.collect();
conn.execute(&sql, params.as_slice())
.map_err(|e| StorageError::Db(e.to_string()))?;
}
@@ -192,7 +196,7 @@ impl Store for SqlStore {
channel_id: &[u8],
limit: usize,
) -> Result<Vec<Vec<u8>>, StorageError> {
let conn = self.conn.lock().unwrap();
let conn = self.lock_conn()?;
let mut stmt = conn
.prepare(
@@ -215,8 +219,10 @@ impl Store for SqlStore {
let ids: Vec<i64> = rows.iter().map(|(id, _)| *id).collect();
let placeholders: String = ids.iter().map(|_| "?").collect::<Vec<_>>().join(",");
let sql = format!("DELETE FROM deliveries WHERE id IN ({placeholders})");
let params: Vec<&dyn rusqlite::types::ToSql> =
ids.iter().map(|id| id as &dyn rusqlite::types::ToSql).collect();
let params: Vec<&dyn rusqlite::types::ToSql> = ids
.iter()
.map(|id| id as &dyn rusqlite::types::ToSql)
.collect();
conn.execute(&sql, params.as_slice())
.map_err(|e| StorageError::Db(e.to_string()))?;
}
@@ -224,12 +230,8 @@ impl Store for SqlStore {
Ok(rows.into_iter().map(|(_, payload)| payload).collect())
}
fn queue_depth(
&self,
recipient_key: &[u8],
channel_id: &[u8],
) -> Result<usize, StorageError> {
let conn = self.conn.lock().unwrap();
fn queue_depth(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<usize, StorageError> {
let conn = self.lock_conn()?;
let count: i64 = conn
.query_row(
"SELECT COUNT(*) FROM deliveries WHERE recipient_key = ?1 AND channel_id = ?2",
@@ -241,7 +243,7 @@ impl Store for SqlStore {
}
fn gc_expired_messages(&self, max_age_secs: u64) -> Result<usize, StorageError> {
let conn = self.conn.lock().unwrap();
let conn = self.lock_conn()?;
let cutoff = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
@@ -261,7 +263,7 @@ impl Store for SqlStore {
identity_key: &[u8],
hybrid_pk: Vec<u8>,
) -> Result<(), StorageError> {
let conn = self.conn.lock().unwrap();
let conn = self.lock_conn()?;
conn.execute(
"INSERT OR REPLACE INTO hybrid_keys (identity_key, hybrid_public_key) VALUES (?1, ?2)",
params![identity_key, hybrid_pk],
@@ -271,7 +273,7 @@ impl Store for SqlStore {
}
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
let conn = self.conn.lock().unwrap();
let conn = self.lock_conn()?;
let mut stmt = conn
.prepare("SELECT hybrid_public_key FROM hybrid_keys WHERE identity_key = ?1")
.map_err(|e| StorageError::Db(e.to_string()))?;
@@ -282,7 +284,7 @@ impl Store for SqlStore {
}
fn store_server_setup(&self, setup: Vec<u8>) -> Result<(), StorageError> {
let conn = self.conn.lock().unwrap();
let conn = self.lock_conn()?;
conn.execute(
"INSERT OR REPLACE INTO server_setup (id, setup_data) VALUES (1, ?1)",
params![setup],
@@ -292,7 +294,7 @@ impl Store for SqlStore {
}
fn get_server_setup(&self) -> Result<Option<Vec<u8>>, StorageError> {
let conn = self.conn.lock().unwrap();
let conn = self.lock_conn()?;
let mut stmt = conn
.prepare("SELECT setup_data FROM server_setup WHERE id = 1")
.map_err(|e| StorageError::Db(e.to_string()))?;
@@ -303,7 +305,7 @@ impl Store for SqlStore {
}
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError> {
let conn = self.conn.lock().unwrap();
let conn = self.lock_conn()?;
conn.execute(
"INSERT OR REPLACE INTO users (username, opaque_record) VALUES (?1, ?2)",
params![username, record],
@@ -313,7 +315,7 @@ impl Store for SqlStore {
}
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
let conn = self.conn.lock().unwrap();
let conn = self.lock_conn()?;
let mut stmt = conn
.prepare("SELECT opaque_record FROM users WHERE username = ?1")
.map_err(|e| StorageError::Db(e.to_string()))?;
@@ -324,7 +326,7 @@ impl Store for SqlStore {
}
fn has_user_record(&self, username: &str) -> Result<bool, StorageError> {
let conn = self.conn.lock().unwrap();
let conn = self.lock_conn()?;
let exists: bool = conn
.query_row(
"SELECT EXISTS(SELECT 1 FROM users WHERE username = ?1)",
@@ -340,7 +342,7 @@ impl Store for SqlStore {
username: &str,
identity_key: Vec<u8>,
) -> Result<(), StorageError> {
let conn = self.conn.lock().unwrap();
let conn = self.lock_conn()?;
conn.execute(
"INSERT OR REPLACE INTO user_identity_keys (username, identity_key) VALUES (?1, ?2)",
params![username, identity_key],
@@ -350,7 +352,7 @@ impl Store for SqlStore {
}
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
let conn = self.conn.lock().unwrap();
let conn = self.lock_conn()?;
let mut stmt = conn
.prepare("SELECT identity_key FROM user_identity_keys WHERE username = ?1")
.map_err(|e| StorageError::Db(e.to_string()))?;
@@ -365,7 +367,7 @@ impl Store for SqlStore {
identity_key: &[u8],
node_addr: Vec<u8>,
) -> Result<(), StorageError> {
let conn = self.conn.lock().unwrap();
let conn = self.lock_conn()?;
conn.execute(
"INSERT OR REPLACE INTO endpoints (identity_key, node_addr) VALUES (?1, ?2)",
params![identity_key, node_addr],
@@ -375,7 +377,7 @@ impl Store for SqlStore {
}
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
let conn = self.conn.lock().unwrap();
let conn = self.lock_conn()?;
let mut stmt = conn
.prepare("SELECT node_addr FROM endpoints WHERE identity_key = ?1")
.map_err(|e| StorageError::Db(e.to_string()))?;
@@ -481,7 +483,9 @@ mod tests {
fn has_user_record_check() {
let store = open_in_memory();
assert!(!store.has_user_record("alice").unwrap());
store.store_user_record("alice", b"record".to_vec()).unwrap();
store
.store_user_record("alice", b"record".to_vec())
.unwrap();
assert!(store.has_user_record("alice").unwrap());
assert!(!store.has_user_record("bob").unwrap());
}
@@ -490,8 +494,13 @@ mod tests {
fn user_identity_key_round_trip() {
let store = open_in_memory();
assert!(store.get_user_identity_key("alice").unwrap().is_none());
store.store_user_identity_key("alice", vec![1u8; 32]).unwrap();
assert_eq!(store.get_user_identity_key("alice").unwrap(), Some(vec![1u8; 32]));
store
.store_user_identity_key("alice", vec![1u8; 32])
.unwrap();
assert_eq!(
store.get_user_identity_key("alice").unwrap(),
Some(vec![1u8; 32])
);
}
#[test]

View File

@@ -18,15 +18,17 @@ pub enum StorageError {
Db(String),
}
fn lock<T>(m: &Mutex<T>) -> Result<std::sync::MutexGuard<'_, T>, StorageError> {
m.lock()
.map_err(|e| StorageError::Io(format!("lock poisoned: {e}")))
}
// ── Store trait ──────────────────────────────────────────────────────────────
/// Abstraction over storage backends (file-backed, SQLCipher, etc.).
pub trait Store: Send + Sync {
fn upload_key_package(
&self,
identity_key: &[u8],
package: Vec<u8>,
) -> Result<(), StorageError>;
fn upload_key_package(&self, identity_key: &[u8], package: Vec<u8>)
-> Result<(), StorageError>;
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
@@ -37,11 +39,7 @@ pub trait Store: Send + Sync {
payload: Vec<u8>,
) -> Result<(), StorageError>;
fn fetch(
&self,
recipient_key: &[u8],
channel_id: &[u8],
) -> Result<Vec<Vec<u8>>, StorageError>;
fn fetch(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<Vec<Vec<u8>>, StorageError>;
/// Fetch up to `limit` messages without draining the entire queue (Fix 8).
fn fetch_limited(
@@ -52,11 +50,7 @@ pub trait Store: Send + Sync {
) -> Result<Vec<Vec<u8>>, StorageError>;
/// Return the number of queued messages for (recipient, channel) (Fix 7).
fn queue_depth(
&self,
recipient_key: &[u8],
channel_id: &[u8],
) -> Result<usize, StorageError>;
fn queue_depth(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<usize, StorageError>;
/// Delete messages older than `max_age_secs`. Returns count deleted (Fix 7).
fn gc_expired_messages(&self, max_age_secs: u64) -> Result<usize, StorageError>;
@@ -95,11 +89,8 @@ pub trait Store: Send + Sync {
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError>;
/// Publish a P2P endpoint address for an identity key.
fn publish_endpoint(
&self,
identity_key: &[u8],
node_addr: Vec<u8>,
) -> Result<(), StorageError>;
fn publish_endpoint(&self, identity_key: &[u8], node_addr: Vec<u8>)
-> Result<(), StorageError>;
/// Resolve a peer's P2P endpoint address.
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
@@ -210,7 +201,9 @@ impl FileBackedStore {
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
}
fn load_delivery_map(path: &Path) -> Result<HashMap<ChannelKey, VecDeque<Vec<u8>>>, StorageError> {
fn load_delivery_map(
path: &Path,
) -> Result<HashMap<ChannelKey, VecDeque<Vec<u8>>>, StorageError> {
if !path.exists() {
return Ok(HashMap::new());
}
@@ -218,22 +211,9 @@ impl FileBackedStore {
if bytes.is_empty() {
return Ok(HashMap::new());
}
// Try v2 format (channel-aware). Fallback to legacy v1 for upgrade.
if let Ok(map) = bincode::deserialize::<QueueMapV2>(&bytes) {
return Ok(map.map);
}
let legacy: QueueMapV1 = bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)?;
let mut upgraded = HashMap::new();
for (recipient_key, queue) in legacy.map.into_iter() {
upgraded.insert(
ChannelKey {
channel_id: Vec::new(),
recipient_key,
},
queue,
);
}
Ok(upgraded)
bincode::deserialize::<QueueMapV2>(&bytes)
.map(|v| v.map)
.map_err(|_| StorageError::Io("deliveries file: v1 format no longer supported; delete or migrate".into()))
}
fn flush_delivery_map(
@@ -283,11 +263,7 @@ impl FileBackedStore {
bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)
}
fn flush_users(
&self,
path: &Path,
map: &HashMap<String, Vec<u8>>,
) -> Result<(), StorageError> {
fn flush_users(&self, path: &Path, map: &HashMap<String, Vec<u8>>) -> Result<(), StorageError> {
let bytes = bincode::serialize(map).map_err(|_| StorageError::Serde)?;
if let Some(parent) = path.parent() {
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
@@ -314,7 +290,7 @@ impl Store for FileBackedStore {
identity_key: &[u8],
package: Vec<u8>,
) -> Result<(), StorageError> {
let mut map = self.key_packages.lock().unwrap();
let mut map = lock(&self.key_packages)?;
map.entry(identity_key.to_vec())
.or_default()
.push_back(package);
@@ -322,7 +298,7 @@ impl Store for FileBackedStore {
}
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
let mut map = self.key_packages.lock().unwrap();
let mut map = lock(&self.key_packages)?;
let package = map.get_mut(identity_key).and_then(|q| q.pop_front());
self.flush_kp_map(&self.kp_path, &*map)?;
Ok(package)
@@ -334,23 +310,17 @@ impl Store for FileBackedStore {
channel_id: &[u8],
payload: Vec<u8>,
) -> Result<(), StorageError> {
let mut map = self.deliveries.lock().unwrap();
let mut map = lock(&self.deliveries)?;
let key = ChannelKey {
channel_id: channel_id.to_vec(),
recipient_key: recipient_key.to_vec(),
};
map.entry(key)
.or_default()
.push_back(payload);
map.entry(key).or_default().push_back(payload);
self.flush_delivery_map(&self.ds_path, &*map)
}
fn fetch(
&self,
recipient_key: &[u8],
channel_id: &[u8],
) -> Result<Vec<Vec<u8>>, StorageError> {
let mut map = self.deliveries.lock().unwrap();
fn fetch(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<Vec<Vec<u8>>, StorageError> {
let mut map = lock(&self.deliveries)?;
let key = ChannelKey {
channel_id: channel_id.to_vec(),
recipient_key: recipient_key.to_vec(),
@@ -369,7 +339,7 @@ impl Store for FileBackedStore {
channel_id: &[u8],
limit: usize,
) -> Result<Vec<Vec<u8>>, StorageError> {
let mut map = self.deliveries.lock().unwrap();
let mut map = lock(&self.deliveries)?;
let key = ChannelKey {
channel_id: channel_id.to_vec(),
recipient_key: recipient_key.to_vec(),
@@ -385,12 +355,8 @@ impl Store for FileBackedStore {
Ok(messages)
}
fn queue_depth(
&self,
recipient_key: &[u8],
channel_id: &[u8],
) -> Result<usize, StorageError> {
let map = self.deliveries.lock().unwrap();
fn queue_depth(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<usize, StorageError> {
let map = lock(&self.deliveries)?;
let key = ChannelKey {
channel_id: channel_id.to_vec(),
recipient_key: recipient_key.to_vec(),
@@ -408,13 +374,13 @@ impl Store for FileBackedStore {
identity_key: &[u8],
hybrid_pk: Vec<u8>,
) -> Result<(), StorageError> {
let mut map = self.hybrid_keys.lock().unwrap();
let mut map = lock(&self.hybrid_keys)?;
map.insert(identity_key.to_vec(), hybrid_pk);
self.flush_hybrid_keys(&self.hk_path, &*map)
}
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
let map = self.hybrid_keys.lock().unwrap();
let map = lock(&self.hybrid_keys)?;
Ok(map.get(identity_key).cloned())
}
@@ -437,18 +403,18 @@ impl Store for FileBackedStore {
}
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError> {
let mut map = self.users.lock().unwrap();
let mut map = lock(&self.users)?;
map.insert(username.to_string(), record);
self.flush_users(&self.users_path, &*map)
}
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
let map = self.users.lock().unwrap();
let map = lock(&self.users)?;
Ok(map.get(username).cloned())
}
fn has_user_record(&self, username: &str) -> Result<bool, StorageError> {
let map = self.users.lock().unwrap();
let map = lock(&self.users)?;
Ok(map.contains_key(username))
}
@@ -457,13 +423,13 @@ impl Store for FileBackedStore {
username: &str,
identity_key: Vec<u8>,
) -> Result<(), StorageError> {
let mut map = self.identity_keys.lock().unwrap();
let mut map = lock(&self.identity_keys)?;
map.insert(username.to_string(), identity_key);
self.flush_map_string_bytes(&self.identity_keys_path, &*map)
}
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
let map = self.identity_keys.lock().unwrap();
let map = lock(&self.identity_keys)?;
Ok(map.get(username).cloned())
}
@@ -472,13 +438,13 @@ impl Store for FileBackedStore {
identity_key: &[u8],
node_addr: Vec<u8>,
) -> Result<(), StorageError> {
let mut map = self.endpoints.lock().unwrap();
let mut map = lock(&self.endpoints)?;
map.insert(identity_key.to_vec(), node_addr);
Ok(())
}
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
let map = self.endpoints.lock().unwrap();
let map = lock(&self.endpoints)?;
Ok(map.get(identity_key).cloned())
}
}