feat: interactive REPL with auto-setup, auto-join, encrypted local storage

REPL auto-setup (zero-friction startup):
- OnceLock → RwLock for CLIENT_AUTH to allow delayed init after OPAQUE login
- Extract opaque_register/opaque_login helpers from one-shot commands
- Token cache (.session file) with QPCE encryption when password provided
- Add --username/--password/--state-password to repl subcommand
- resolve_access_token: auto-register + login, cache token, prompt interactively
- rpassword for secure password input (no echo)

Interactive REPL (multi-conversation):
- SessionState: identity, hybrid key, ConversationStore, per-conversation GroupMembers
- ConversationStore: SQLite-backed conversations + messages with full CRUD
- Slash commands: /dm, /group, /invite, /join, /switch, /list, /members, /history, /whoami
- Background polling (1s interval) with auto-join from MLS Welcome messages
- pending_member pattern: persistent keystore for HPKE init key, replenish after join
- Self-DM handled as local-only notepad (no MLS/server channel)
- ANSI display module for colored prompts, incoming messages, status/error output

Username resolution:
- resolveIdentity RPC (@20 in node.capnp): look up username by identity key
- Server: resolve_identity_key in Store trait, FileBackedStore, SqlStore
- Client: resolve_identity in rpc.rs, used in auto-join for peer display names
- resolveUser: bidirectional lookup (username → identity key)

Encrypted local storage (nothing in cleartext):
- ConversationStore uses SQLCipher when --state-password is provided
- Argon2id key derivation with per-database random salt (.convdb-salt, mode 0600)
- Transparent migration of existing unencrypted databases via sqlcipher_export
- Token cache encrypted with QPCE format (Argon2id + ChaCha20Poly1305)

Server changes:
- resolveIdentity + resolveUser RPC handlers with auth + validation
- Auth: sealed-sender identity binding on enqueue, channel member authorization
- Delivery: hybrid decrypt attempts, identity key validation on enqueue
- Config: --allow-sealed-sender flag for anonymous delivery mode
- zeroize added to server dependencies

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-26 22:45:34 +01:00
parent 4c1e4683e3
commit 553de3a2b7
23 changed files with 2791 additions and 33 deletions

View File

@@ -36,6 +36,7 @@ rcgen = { workspace = true }
opaque-ke = { workspace = true }
rand = { workspace = true }
subtle = { workspace = true }
zeroize = { workspace = true }
# Database
rusqlite = { workspace = true }

View File

@@ -1,3 +1,4 @@
use std::net::IpAddr;
use std::sync::Arc;
use dashmap::DashMap;
@@ -5,6 +6,7 @@ use quicnprotochat_proto::node_capnp::auth;
use sha2::Digest;
use subtle::ConstantTimeEq;
use tokio::sync::Notify;
use zeroize::Zeroizing;
use crate::error_codes::*;
@@ -13,19 +15,29 @@ pub const PENDING_LOGIN_TTL_SECS: u64 = 300; // 5 minutes
pub const RATE_LIMIT_WINDOW_SECS: u64 = 60;
pub const RATE_LIMIT_MAX_ENQUEUES: u32 = 100;
#[derive(Clone, Debug)]
#[derive(Clone)]
pub struct AuthConfig {
pub required_token: Option<Vec<u8>>,
/// Server bearer token — zeroized on drop to prevent memory disclosure.
pub required_token: Option<Zeroizing<Vec<u8>>>,
/// When true, a valid bearer token (no session) is accepted and the request's identity/key is used (dev/e2e only).
/// CLI flag: --allow-insecure-auth / QUICNPROTOCHAT_ALLOW_INSECURE_AUTH.
pub allow_insecure_identity_from_request: bool,
}
impl std::fmt::Debug for AuthConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("AuthConfig")
.field("required_token", &self.required_token.as_ref().map(|_| "[REDACTED]"))
.field("allow_insecure_identity_from_request", &self.allow_insecure_identity_from_request)
.finish()
}
}
impl AuthConfig {
pub fn new(required_token: Option<String>, allow_insecure_identity_from_request: bool) -> Self {
let required_token = required_token
.filter(|s| !s.is_empty())
.map(|s| s.into_bytes());
.map(|s| Zeroizing::new(s.into_bytes()));
Self {
required_token,
allow_insecure_identity_from_request,
@@ -133,7 +145,7 @@ pub fn validate_auth_context(
}
if let Some(expected) = &cfg.required_token {
if expected.len() == token.len() && bool::from(expected.ct_eq(&token)) {
if expected.len() == token.len() && bool::from(expected.as_slice().ct_eq(&token)) {
return Ok(AuthContext {
token,
identity_key: None,
@@ -216,6 +228,30 @@ pub fn waiter(waiters: &DashMap<Vec<u8>, Arc<Notify>>, recipient_key: &[u8]) ->
.clone()
}
pub const CONN_RATE_LIMIT_WINDOW_SECS: u64 = 60;
pub const CONN_RATE_LIMIT_MAX: u32 = 50;
/// Per-IP connection rate limiter. Returns `true` if the connection is allowed.
pub fn check_conn_rate_limit(
conn_rate_limits: &DashMap<IpAddr, RateEntry>,
ip: IpAddr,
) -> bool {
let now = current_timestamp();
let mut entry = conn_rate_limits.entry(ip).or_insert(RateEntry {
count: 0,
window_start: now,
});
if now - entry.window_start >= CONN_RATE_LIMIT_WINDOW_SECS {
entry.count = 1;
entry.window_start = now;
true
} else {
entry.count += 1;
entry.count <= CONN_RATE_LIMIT_MAX
}
}
pub fn fingerprint(data: &[u8]) -> Vec<u8> {
sha2::Sha256::digest(data).to_vec()
}

View File

@@ -163,6 +163,9 @@ pub fn merge_config(args: &crate::Args, file: &FileConfig) -> EffectiveConfig {
}
pub fn validate_production_config(effective: &EffectiveConfig) -> anyhow::Result<()> {
if effective.allow_insecure_auth {
anyhow::bail!("production forbids --allow-insecure-auth");
}
let token = effective
.auth_token
.as_deref()
@@ -178,6 +181,12 @@ pub fn validate_production_config(effective: &EffectiveConfig) -> anyhow::Result
if effective.store_backend == "sql" && effective.db_key.is_empty() {
anyhow::bail!("production with store_backend=sql requires non-empty QUICNPROTOCHAT_DB_KEY");
}
if effective.store_backend != "sql" {
tracing::warn!(
"production is using file-backed storage; \
consider store_backend=sql with QUICNPROTOCHAT_DB_KEY for encryption at rest"
);
}
if !effective.tls_cert.exists() || !effective.tls_key.exists() {
anyhow::bail!(
"production requires existing TLS cert and key (no auto-generation); provide QUICNPROTOCHAT_TLS_CERT and QUICNPROTOCHAT_TLS_KEY"

View File

@@ -2,7 +2,7 @@
//!
//! The server hosts Authentication + Delivery services over QUIC + Cap'n Proto.
use std::{net::SocketAddr, path::PathBuf, sync::Arc};
use std::{net::IpAddr, net::SocketAddr, path::PathBuf, sync::Arc};
use anyhow::Context;
use clap::Parser;
@@ -167,7 +167,11 @@ async fn main() -> anyhow::Result<()> {
// Harden QUIC transport: idle timeout, limit stream concurrency.
let mut transport = quinn::TransportConfig::default();
transport.max_idle_timeout(Some(std::time::Duration::from_secs(300).try_into().unwrap()));
transport.max_idle_timeout(Some(
std::time::Duration::from_secs(300)
.try_into()
.expect("300s is a valid IdleTimeout"),
));
transport.max_concurrent_bidi_streams(1u32.into());
transport.max_concurrent_uni_streams(0u32.into());
server_config.transport_config(Arc::new(transport));
@@ -223,12 +227,14 @@ async fn main() -> anyhow::Result<()> {
let pending_logins: Arc<DashMap<String, PendingLogin>> = Arc::new(DashMap::new());
let sessions: Arc<DashMap<Vec<u8>, SessionInfo>> = Arc::new(DashMap::new());
let rate_limits: Arc<DashMap<Vec<u8>, RateEntry>> = Arc::new(DashMap::new());
let conn_rate_limits: Arc<DashMap<IpAddr, RateEntry>> = Arc::new(DashMap::new());
// Background cleanup task (expire sessions, pending logins, rate limits, and stale messages).
spawn_cleanup_task(
Arc::clone(&sessions),
Arc::clone(&pending_logins),
Arc::clone(&rate_limits),
Arc::clone(&conn_rate_limits),
Arc::clone(&store),
Arc::clone(&waiters),
);
@@ -254,6 +260,14 @@ async fn main() -> anyhow::Result<()> {
None => break,
};
// Per-IP connection rate limiting.
let remote_ip = incoming.remote_address().ip();
if !auth::check_conn_rate_limit(&conn_rate_limits, remote_ip) {
tracing::warn!(ip = %remote_ip, "connection rate limit exceeded, dropping");
incoming.refuse();
continue;
}
let connecting = match incoming.accept() {
Ok(c) => c,
Err(e) => {
@@ -298,6 +312,10 @@ async fn main() -> anyhow::Result<()> {
}
}
// Grace period: let in-flight RPC tasks on the LocalSet finish.
tracing::info!("waiting up to 5s for in-flight RPCs to complete");
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
Ok::<(), anyhow::Error>(())
})
.await?;

View File

@@ -92,12 +92,10 @@ impl NodeServiceImpl {
}
// When sealed_sender is true, enqueue does not require identity; valid token only.
// Otherwise, the sender must have an identity-bound session (but their identity
// does NOT need to match the recipient — they're sending TO the recipient).
if !self.sealed_sender {
if let Err(e) = require_identity_or_request(
&auth_ctx,
&recipient_key,
self.auth_cfg.allow_insecure_identity_from_request,
) {
if let Err(e) = crate::auth::require_identity(&auth_ctx) {
return Promise::err(e);
}
}
@@ -563,6 +561,36 @@ impl NodeServiceImpl {
return Promise::err(e);
}
// When sealed_sender is false, require an identity-bound session.
if !self.sealed_sender {
if let Err(e) = crate::auth::require_identity(&auth_ctx) {
return Promise::err(e);
}
}
// DM channel authz: validate caller membership once before the loop.
if channel_id.len() == 16 {
let members = match self.store.get_channel_members(&channel_id) {
Ok(Some(m)) => m,
Ok(None) => {
return Promise::err(coded_error(E023_CHANNEL_NOT_FOUND, "channel not found"));
}
Err(e) => return Promise::err(storage_err(e)),
};
let caller = match crate::auth::require_identity(&auth_ctx) {
Ok(id) => id,
Err(e) => return Promise::err(e),
};
let (a, b) = &members;
let caller_in = caller == a.as_slice() || caller == b.as_slice();
if !caller_in {
return Promise::err(coded_error(
E022_CHANNEL_ACCESS_DENIED,
"caller is not a member of this channel",
));
}
}
let mut seqs = Vec::with_capacity(recipient_keys.len() as usize);
for i in 0..recipient_keys.len() {
let rk = match recipient_keys.get(i) {
@@ -576,6 +604,33 @@ impl NodeServiceImpl {
));
}
// Per-recipient DM channel membership check.
if channel_id.len() == 16 {
let members = match self.store.get_channel_members(&channel_id) {
Ok(Some(m)) => m,
Ok(None) => {
return Promise::err(coded_error(
E023_CHANNEL_NOT_FOUND,
"channel not found",
));
}
Err(e) => return Promise::err(storage_err(e)),
};
let caller = match crate::auth::require_identity(&auth_ctx) {
Ok(id) => id,
Err(e) => return Promise::err(e),
};
let (a, b) = &members;
let recipient_other = (rk == *a && caller == b.as_slice())
|| (rk == *b && caller == a.as_slice());
if !recipient_other {
return Promise::err(coded_error(
E022_CHANNEL_ACCESS_DENIED,
"recipient is not a member of this channel",
));
}
}
match self.store.queue_depth(&rk, &channel_id) {
Ok(depth) if depth >= MAX_QUEUE_DEPTH => {
return Promise::err(coded_error(

View File

@@ -214,10 +214,10 @@ impl NodeServiceImpl {
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
Ok(ctx) => ctx,
Err(e) => return Promise::err(e),
};
// Auth check only — any authenticated user can fetch any peer's hybrid public key.
if let Err(e) = validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
return Promise::err(e);
}
if identity_key.len() != 32 {
return Promise::err(coded_error(
@@ -226,14 +226,6 @@ impl NodeServiceImpl {
));
}
if let Err(e) = require_identity_or_request(
&auth_ctx,
&identity_key,
self.auth_cfg.allow_insecure_identity_from_request,
) {
return Promise::err(e);
}
let hybrid_pk = match self
.store
.fetch_hybrid_key(&identity_key)

View File

@@ -23,6 +23,7 @@ mod channel_ops;
mod delivery;
mod key_ops;
mod p2p_ops;
mod user_ops;
impl node_service::Server for NodeServiceImpl {
fn upload_key_package(
@@ -176,6 +177,22 @@ impl node_service::Server for NodeServiceImpl {
) -> capnp::capability::Promise<(), capnp::Error> {
self.handle_create_channel(params, results)
}
fn resolve_user(
&mut self,
params: node_service::ResolveUserParams,
results: node_service::ResolveUserResults,
) -> capnp::capability::Promise<(), capnp::Error> {
self.handle_resolve_user(params, results)
}
fn resolve_identity(
&mut self,
params: node_service::ResolveIdentityParams,
results: node_service::ResolveIdentityResults,
) -> capnp::capability::Promise<(), capnp::Error> {
self.handle_resolve_identity(params, results)
}
}
pub const CURRENT_WIRE_VERSION: u16 = 1;
@@ -268,6 +285,7 @@ pub fn spawn_cleanup_task(
sessions: Arc<DashMap<Vec<u8>, SessionInfo>>,
pending_logins: Arc<DashMap<String, PendingLogin>>,
rate_limits: Arc<DashMap<Vec<u8>, RateEntry>>,
conn_rate_limits: Arc<DashMap<std::net::IpAddr, RateEntry>>,
store: Arc<dyn Store>,
waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
) {
@@ -280,6 +298,9 @@ pub fn spawn_cleanup_task(
sessions.retain(|_, info| info.expires_at > now);
pending_logins.retain(|_, pl| now - pl.created_at < PENDING_LOGIN_TTL_SECS);
rate_limits.retain(|_, entry| now - entry.window_start < RATE_LIMIT_WINDOW_SECS * 2);
conn_rate_limits.retain(|_, entry| {
now - entry.window_start < crate::auth::CONN_RATE_LIMIT_WINDOW_SECS * 2
});
// Bound map sizes to prevent unbounded growth from malicious clients.
const MAX_SESSIONS: usize = 100_000;

View File

@@ -0,0 +1,94 @@
//! resolveUser / resolveIdentity RPCs: bidirectional username ↔ identity key lookup.
use capnp::capability::Promise;
use quicnprotochat_proto::node_capnp::node_service;
use crate::auth::{coded_error, validate_auth_context};
use crate::error_codes::*;
use crate::storage::StorageError;
use super::NodeServiceImpl;
fn storage_err(err: StorageError) -> capnp::Error {
coded_error(E009_STORAGE_ERROR, err)
}
impl NodeServiceImpl {
pub fn handle_resolve_user(
&mut self,
params: node_service::ResolveUserParams,
mut results: node_service::ResolveUserResults,
) -> Promise<(), capnp::Error> {
let p = match params.get() {
Ok(p) => p,
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
let username = match p.get_username() {
Ok(u) => u,
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
let _auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
Ok(ctx) => ctx,
Err(e) => return Promise::err(e),
};
let username_str = match username.to_str() {
Ok(s) => s,
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
if username_str.is_empty() {
return Promise::err(coded_error(E020_BAD_PARAMS, "username must not be empty"));
}
match self.store.get_user_identity_key(username_str) {
Ok(Some(key)) => {
results.get().set_identity_key(&key);
}
Ok(None) => {
// Return empty Data — caller checks length to detect "not found".
}
Err(e) => return Promise::err(storage_err(e)),
}
Promise::ok(())
}
pub fn handle_resolve_identity(
&mut self,
params: node_service::ResolveIdentityParams,
mut results: node_service::ResolveIdentityResults,
) -> Promise<(), capnp::Error> {
let p = match params.get() {
Ok(p) => p,
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
let identity_key = match p.get_identity_key() {
Ok(v) => v,
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
let _auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
Ok(ctx) => ctx,
Err(e) => return Promise::err(e),
};
if identity_key.len() != 32 {
return Promise::err(coded_error(
E004_IDENTITY_KEY_LENGTH,
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
));
}
match self.store.resolve_identity_key(identity_key) {
Ok(Some(username)) => {
results.get().set_username(&username);
}
Ok(None) => {
// Return empty string — caller checks length to detect "not found".
}
Err(e) => return Promise::err(storage_err(e)),
}
Promise::ok(())
}
}

View File

@@ -369,6 +369,17 @@ impl Store for SqlStore {
.map_err(|e| StorageError::Db(e.to_string()))
}
fn resolve_identity_key(&self, identity_key: &[u8]) -> Result<Option<String>, StorageError> {
let conn = self.lock_conn()?;
let mut stmt = conn
.prepare("SELECT username FROM user_identity_keys WHERE identity_key = ?1")
.map_err(|e| StorageError::Db(e.to_string()))?;
stmt.query_row(params![identity_key], |row| row.get(0))
.optional()
.map_err(|e| StorageError::Db(e.to_string()))
}
fn peek(
&self,
recipient_key: &[u8],

View File

@@ -100,6 +100,9 @@ pub trait Store: Send + Sync {
/// Retrieve identity key for a user (Fix 2).
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError>;
/// Reverse lookup: resolve an identity key to the registered username.
fn resolve_identity_key(&self, identity_key: &[u8]) -> Result<Option<String>, StorageError>;
/// Peek at queued messages without removing them (non-destructive).
/// Returns `(seq, payload)` pairs ordered by seq.
fn peek(
@@ -546,6 +549,16 @@ impl Store for FileBackedStore {
Ok(map.get(username).cloned())
}
fn resolve_identity_key(&self, identity_key: &[u8]) -> Result<Option<String>, StorageError> {
let map = lock(&self.identity_keys)?;
for (username, ik) in map.iter() {
if ik.as_slice() == identity_key {
return Ok(Some(username.clone()));
}
}
Ok(None)
}
fn peek(
&self,
recipient_key: &[u8],