feat: Sprint 10+11 — privacy hardening and multi-device support

Privacy Hardening (Sprint 10):
- Server --redact-logs flag: SHA-256 hashed identity prefixes in audit
  logs, payload_len omitted when enabled
- Client /privacy command suite: redact-keys on|off, auto-clear with
  duration parsing, padding on|off for traffic analysis resistance
- Forward secrecy: /verify-fs checks MLS epoch advancement,
  /rotate-all-keys rotates MLS leaf + hybrid KEM keypair
- Dummy message type (0x09): constant-rate traffic padding every 30s,
  silently discarded by recipients, serialize_dummy() + parse support
- delete_messages_before() for auto-clear in ConversationStore

Multi-Device Support (Sprint 11):
- Device registry: registerDevice @24, listDevices @25, revokeDevice @26
  RPCs with Device struct (deviceId, deviceName, registeredAt)
- Server storage: devices table (migration 008), max 5 per identity,
  E029_DEVICE_LIMIT and E030_DEVICE_NOT_FOUND error codes
- Device cleanup integrated into deleteAccount transaction
- Client REPL: /devices, /register-device <name>, /revoke-device <id>

72 core + 35 server tests pass.
This commit is contained in:
2026-03-04 01:55:23 +01:00
parent 1b61b7ee8f
commit 9244e80ec7
16 changed files with 958 additions and 45 deletions

View File

@@ -0,0 +1,8 @@
CREATE TABLE IF NOT EXISTS devices (
identity_key BLOB NOT NULL,
device_id BLOB NOT NULL,
device_name TEXT NOT NULL DEFAULT '',
registered_at INTEGER NOT NULL DEFAULT (strftime('%s','now')),
PRIMARY KEY (identity_key, device_id)
);
CREATE INDEX IF NOT EXISTS idx_devices_identity ON devices(identity_key);

View File

@@ -33,6 +33,9 @@ pub struct FileConfig {
pub federation: Option<FederationFileConfig>,
/// Directory containing plugin `.so` / `.dylib` files to load at startup.
pub plugin_dir: Option<PathBuf>,
/// When true, audit logs hash identity key prefixes and omit payload sizes.
#[serde(default)]
pub redact_logs: Option<bool>,
}
#[derive(Debug)]
@@ -55,6 +58,8 @@ pub struct EffectiveConfig {
pub federation: Option<EffectiveFederationConfig>,
/// Directory to scan for plugin `.so` / `.dylib` files at startup. None = no plugins.
pub plugin_dir: Option<PathBuf>,
/// When true, audit logs hash identity key prefixes and omit payload sizes.
pub redact_logs: bool,
}
#[derive(Debug, Default, Deserialize)]
@@ -219,6 +224,7 @@ pub fn merge_config(args: &crate::Args, file: &FileConfig) -> EffectiveConfig {
};
let plugin_dir = args.plugin_dir.clone().or_else(|| file.plugin_dir.clone());
let redact_logs = args.redact_logs || file.redact_logs.unwrap_or(false);
EffectiveConfig {
listen,
@@ -235,6 +241,7 @@ pub fn merge_config(args: &crate::Args, file: &FileConfig) -> EffectiveConfig {
metrics_enabled,
federation,
plugin_dir,
redact_logs,
}
}

View File

@@ -31,6 +31,8 @@ pub const E025_BLOB_HASH_LENGTH: &str = "E025";
pub const E026_BLOB_HASH_MISMATCH: &str = "E026";
pub const E027_BLOB_NOT_FOUND: &str = "E027";
pub const E028_ACCOUNT_DELETION_FAILED: &str = "E028";
pub const E029_DEVICE_LIMIT: &str = "E029";
pub const E030_DEVICE_NOT_FOUND: &str = "E030";
/// Build a `capnp::Error::failed()` with the structured code prefix.
pub fn coded_error(code: &str, msg: impl std::fmt::Display) -> capnp::Error {

View File

@@ -115,6 +115,10 @@ struct Args {
/// Each library must export `extern "C" fn qpq_plugin_init(vtable: *mut HookVTable) -> i32`.
#[arg(long, env = "QPQ_PLUGIN_DIR")]
plugin_dir: Option<PathBuf>,
/// Redact identity key prefixes and payload sizes in audit logs for metadata minimization.
#[arg(long, env = "QPQ_REDACT_LOGS", default_value_t = false)]
redact_logs: bool,
}
// ── Entry point ───────────────────────────────────────────────────────────────
@@ -599,6 +603,7 @@ async fn main() -> anyhow::Result<()> {
let conn_hooks = Arc::clone(&hooks);
let conn_kt_log = Arc::clone(&kt_log);
let conn_data_dir = PathBuf::from(&effective.data_dir);
let conn_redact_logs = effective.redact_logs;
tokio::task::spawn_local(async move {
if let Err(e) = handle_node_connection(
@@ -617,6 +622,7 @@ async fn main() -> anyhow::Result<()> {
conn_hooks,
conn_kt_log,
conn_data_dir,
conn_redact_logs,
)
.await
{

View File

@@ -21,6 +21,12 @@ use crate::hooks::{HookAction, MessageEvent, FetchEvent};
// Audit events here must not include secrets: no payload content, no full recipient/token bytes (prefix only).
/// Hash first 4 bytes of the key's SHA-256 as a hex string (for redacted audit logs).
fn redacted_prefix(key: &[u8]) -> String {
let hash = Sha256::digest(key);
fmt_hex(&hash[..4])
}
const MAX_PAYLOAD_BYTES: usize = 5 * 1024 * 1024; // 5 MB cap per message
const MAX_QUEUE_DEPTH: usize = 1000;
@@ -219,7 +225,7 @@ impl NodeServiceImpl {
// Hook: on_message_enqueue — fires after validation, before storage.
let hook_event = MessageEvent {
sender_identity,
sender_identity: sender_identity.clone(),
recipient_key: recipient_key.clone(),
channel_id: channel_id.clone(),
payload_len,
@@ -254,13 +260,26 @@ impl NodeServiceImpl {
if let Ok(depth) = self.store.queue_depth(&recipient_key, &channel_id) {
metrics::record_delivery_queue_depth(depth);
}
tracing::info!(
sender_prefix = sender_prefix.as_deref().unwrap_or("sealed"),
recipient_prefix = %fmt_hex(&recipient_key[..4]),
payload_len = payload_len,
seq = seq,
"audit: enqueue"
);
if self.redact_logs {
let redacted_sender = sender_identity
.as_deref()
.map(|id| redacted_prefix(id))
.unwrap_or_else(|| "sealed".to_string());
tracing::info!(
sender_prefix = %redacted_sender,
recipient_prefix = %redacted_prefix(&recipient_key),
seq = seq,
"audit: enqueue"
);
} else {
tracing::info!(
sender_prefix = sender_prefix.as_deref().unwrap_or("sealed"),
recipient_prefix = %fmt_hex(&recipient_key[..4]),
payload_len = payload_len,
seq = seq,
"audit: enqueue"
);
}
crate::auth::waiter(&self.waiters, &recipient_key).notify_waiters();
@@ -380,11 +399,19 @@ impl NodeServiceImpl {
// Audit: fetch — do not log payload or full keys.
metrics::record_fetch_total();
tracing::info!(
recipient_prefix = %fmt_hex(&recipient_key[..4]),
count = messages.len(),
"audit: fetch"
);
if self.redact_logs {
tracing::info!(
recipient_prefix = %redacted_prefix(&recipient_key),
count = messages.len(),
"audit: fetch"
);
} else {
tracing::info!(
recipient_prefix = %fmt_hex(&recipient_key[..4]),
count = messages.len(),
"audit: fetch"
);
}
let mut list = results.get().init_payloads(messages.len() as u32);
for (i, (seq, data)) in messages.iter().enumerate() {
@@ -546,11 +573,19 @@ impl NodeServiceImpl {
Err(e) => return Promise::err(e),
};
tracing::info!(
recipient_prefix = %fmt_hex(&recipient_key[..4]),
count = messages.len(),
"audit: peek"
);
if self.redact_logs {
tracing::info!(
recipient_prefix = %redacted_prefix(&recipient_key),
count = messages.len(),
"audit: peek"
);
} else {
tracing::info!(
recipient_prefix = %fmt_hex(&recipient_key[..4]),
count = messages.len(),
"audit: peek"
);
}
let mut list = results.get().init_payloads(messages.len() as u32);
for (i, (seq, data)) in messages.iter().enumerate() {
@@ -610,12 +645,21 @@ impl NodeServiceImpl {
.map_err(storage_err)
{
Ok(removed) => {
tracing::info!(
recipient_prefix = %fmt_hex(&recipient_key[..4]),
seq_up_to = seq_up_to,
removed = removed,
"audit: ack"
);
if self.redact_logs {
tracing::info!(
recipient_prefix = %redacted_prefix(&recipient_key),
seq_up_to = seq_up_to,
removed = removed,
"audit: ack"
);
} else {
tracing::info!(
recipient_prefix = %fmt_hex(&recipient_key[..4]),
seq_up_to = seq_up_to,
removed = removed,
"audit: ack"
);
}
}
Err(e) => return Promise::err(e),
}
@@ -778,6 +822,7 @@ impl NodeServiceImpl {
let fed_client = self.federation_client.clone();
let local_domain = self.local_domain.clone();
let hooks = Arc::clone(&self.hooks);
let redact_logs = self.redact_logs;
// Use an async future to support federation relay alongside local enqueue.
// All storage operations are synchronous; only federation relay calls are await-ed.
@@ -839,12 +884,19 @@ impl NodeServiceImpl {
// Hook: on_batch_enqueue — fires after all messages are stored.
hooks.on_batch_enqueue(&hook_events);
tracing::info!(
sender_prefix = sender_prefix.as_deref().unwrap_or("sealed"),
recipient_count = n,
payload_len = payload.len(),
"audit: batch_enqueue"
);
if redact_logs {
tracing::info!(
recipient_count = n,
"audit: batch_enqueue"
);
} else {
tracing::info!(
sender_prefix = sender_prefix.as_deref().unwrap_or("sealed"),
recipient_count = n,
payload_len = payload.len(),
"audit: batch_enqueue"
);
}
Ok(())
})

View File

@@ -0,0 +1,151 @@
//! Device registry RPC handlers: registerDevice, listDevices, revokeDevice.
use capnp::capability::Promise;
use quicproquo_proto::node_capnp::node_service;
use crate::auth::{coded_error, require_identity, validate_auth_context};
use crate::error_codes::*;
use crate::storage::StorageError;
use super::NodeServiceImpl;
const MAX_DEVICES_PER_IDENTITY: usize = 5;
fn storage_err(err: StorageError) -> capnp::Error {
coded_error(E009_STORAGE_ERROR, err)
}
impl NodeServiceImpl {
pub fn handle_register_device(
&mut self,
params: node_service::RegisterDeviceParams,
mut results: node_service::RegisterDeviceResults,
) -> Promise<(), capnp::Error> {
let p = match params.get() {
Ok(p) => p,
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
Ok(ctx) => ctx,
Err(e) => return Promise::err(e),
};
let identity_key = match require_identity(&auth_ctx) {
Ok(ik) => ik.to_vec(),
Err(e) => return Promise::err(e),
};
let device_id = match p.get_device_id() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
if device_id.is_empty() {
return Promise::err(coded_error(E020_BAD_PARAMS, "deviceId must not be empty"));
}
let device_name = match p.get_device_name() {
Ok(n) => n.to_str().unwrap_or("").to_string(),
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
// Check device limit.
match self.store.device_count(&identity_key) {
Ok(count) if count >= MAX_DEVICES_PER_IDENTITY => {
return Promise::err(coded_error(
E029_DEVICE_LIMIT,
format!("maximum {MAX_DEVICES_PER_IDENTITY} devices per identity"),
));
}
Err(e) => return Promise::err(storage_err(e)),
_ => {}
}
match self.store.register_device(&identity_key, &device_id, &device_name) {
Ok(success) => {
results.get().set_success(success);
Promise::ok(())
}
Err(e) => Promise::err(storage_err(e)),
}
}
pub fn handle_list_devices(
&mut self,
params: node_service::ListDevicesParams,
mut results: node_service::ListDevicesResults,
) -> Promise<(), capnp::Error> {
let p = match params.get() {
Ok(p) => p,
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
Ok(ctx) => ctx,
Err(e) => return Promise::err(e),
};
let identity_key = match require_identity(&auth_ctx) {
Ok(ik) => ik.to_vec(),
Err(e) => return Promise::err(e),
};
let devices = match self.store.list_devices(&identity_key) {
Ok(d) => d,
Err(e) => return Promise::err(storage_err(e)),
};
let r = results.get();
let mut list = r.init_devices(devices.len() as u32);
for (i, (device_id, name, registered_at)) in devices.iter().enumerate() {
let mut entry = list.reborrow().get(i as u32);
entry.set_device_id(device_id);
entry.set_device_name(name);
entry.set_registered_at(*registered_at);
}
Promise::ok(())
}
pub fn handle_revoke_device(
&mut self,
params: node_service::RevokeDeviceParams,
mut results: node_service::RevokeDeviceResults,
) -> Promise<(), capnp::Error> {
let p = match params.get() {
Ok(p) => p,
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
Ok(ctx) => ctx,
Err(e) => return Promise::err(e),
};
let identity_key = match require_identity(&auth_ctx) {
Ok(ik) => ik.to_vec(),
Err(e) => return Promise::err(e),
};
let device_id = match p.get_device_id() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
};
if device_id.is_empty() {
return Promise::err(coded_error(E020_BAD_PARAMS, "deviceId must not be empty"));
}
match self.store.revoke_device(&identity_key, &device_id) {
Ok(true) => {
results.get().set_success(true);
Promise::ok(())
}
Ok(false) => {
Promise::err(coded_error(E030_DEVICE_NOT_FOUND, "device not found"))
}
Err(e) => Promise::err(storage_err(e)),
}
}
}

View File

@@ -25,6 +25,7 @@ mod auth_ops;
mod blob_ops;
mod channel_ops;
mod delivery;
mod device_ops;
mod key_ops;
mod p2p_ops;
mod user_ops;
@@ -221,6 +222,30 @@ impl node_service::Server for NodeServiceImpl {
) -> capnp::capability::Promise<(), capnp::Error> {
self.handle_delete_account(params, results)
}
fn register_device(
&mut self,
params: node_service::RegisterDeviceParams,
results: node_service::RegisterDeviceResults,
) -> capnp::capability::Promise<(), capnp::Error> {
self.handle_register_device(params, results)
}
fn list_devices(
&mut self,
params: node_service::ListDevicesParams,
results: node_service::ListDevicesResults,
) -> capnp::capability::Promise<(), capnp::Error> {
self.handle_list_devices(params, results)
}
fn revoke_device(
&mut self,
params: node_service::RevokeDeviceParams,
results: node_service::RevokeDeviceResults,
) -> capnp::capability::Promise<(), capnp::Error> {
self.handle_revoke_device(params, results)
}
}
pub const CURRENT_WIRE_VERSION: u16 = 1;
@@ -247,6 +272,8 @@ pub struct NodeServiceImpl {
pub kt_log: Arc<std::sync::Mutex<MerkleLog>>,
/// Server data directory (used for blob storage).
pub data_dir: PathBuf,
/// When true, hash identity key prefixes and omit payload sizes in audit logs.
pub redact_logs: bool,
}
impl NodeServiceImpl {
@@ -266,6 +293,7 @@ impl NodeServiceImpl {
hooks: Arc<dyn crate::hooks::ServerHooks>,
kt_log: Arc<std::sync::Mutex<MerkleLog>>,
data_dir: PathBuf,
redact_logs: bool,
) -> Self {
Self {
store,
@@ -282,6 +310,7 @@ impl NodeServiceImpl {
signing_key,
kt_log,
data_dir,
redact_logs,
}
}
}
@@ -303,6 +332,7 @@ pub async fn handle_node_connection(
hooks: Arc<dyn crate::hooks::ServerHooks>,
kt_log: Arc<std::sync::Mutex<MerkleLog>>,
data_dir: PathBuf,
redact_logs: bool,
) -> Result<(), anyhow::Error> {
let connection = connecting.await?;
@@ -338,6 +368,7 @@ pub async fn handle_node_connection(
hooks,
kt_log,
data_dir,
redact_logs,
));
RpcSystem::new(Box::new(network), Some(service.client))

View File

@@ -9,7 +9,7 @@ use rusqlite::{params, Connection};
use crate::storage::{StorageError, Store};
/// Schema version after introducing the migration runner (existing DBs had 1).
const SCHEMA_VERSION: i32 = 8;
const SCHEMA_VERSION: i32 = 9;
/// Migrations: (migration_number, SQL). Files named NNN_name.sql, applied in order when N > user_version.
const MIGRATIONS: &[(i32, &str)] = &[
@@ -20,6 +20,7 @@ const MIGRATIONS: &[(i32, &str)] = &[
(6, include_str!("../migrations/005_signing_key.sql")),
(7, include_str!("../migrations/006_kt_log.sql")),
(8, include_str!("../migrations/007_add_expiry.sql")),
(9, include_str!("../migrations/008_devices.sql")),
];
/// Runs pending migrations on an open connection: applies any migration whose number is greater
@@ -693,6 +694,12 @@ impl Store for SqlStore {
params![identity_key],
);
// 8. Delete devices.
let _ = conn.execute(
"DELETE FROM devices WHERE identity_key = ?1",
params![identity_key],
);
// Do NOT delete KT log entries — append-only for auditability.
Ok(())
@@ -710,6 +717,69 @@ impl Store for SqlStore {
}
}
}
fn register_device(&self, identity_key: &[u8], device_id: &[u8], device_name: &str) -> Result<bool, StorageError> {
let conn = self.lock_conn()?;
// Check if device already exists.
let exists: bool = conn
.query_row(
"SELECT EXISTS(SELECT 1 FROM devices WHERE identity_key = ?1 AND device_id = ?2)",
params![identity_key, device_id],
|row| row.get(0),
)
.map_err(|e| StorageError::Db(e.to_string()))?;
if exists {
return Ok(false);
}
conn.execute(
"INSERT INTO devices (identity_key, device_id, device_name) VALUES (?1, ?2, ?3)",
params![identity_key, device_id, device_name],
)
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(true)
}
fn list_devices(&self, identity_key: &[u8]) -> Result<Vec<(Vec<u8>, String, u64)>, StorageError> {
let conn = self.lock_conn()?;
let mut stmt = conn
.prepare("SELECT device_id, device_name, registered_at FROM devices WHERE identity_key = ?1 ORDER BY registered_at ASC")
.map_err(|e| StorageError::Db(e.to_string()))?;
let rows = stmt
.query_map(params![identity_key], |row| {
Ok((
row.get::<_, Vec<u8>>(0)?,
row.get::<_, String>(1)?,
row.get::<_, i64>(2)? as u64,
))
})
.map_err(|e| StorageError::Db(e.to_string()))?
.collect::<Result<Vec<_>, _>>()
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(rows)
}
fn revoke_device(&self, identity_key: &[u8], device_id: &[u8]) -> Result<bool, StorageError> {
let conn = self.lock_conn()?;
let deleted = conn
.execute(
"DELETE FROM devices WHERE identity_key = ?1 AND device_id = ?2",
params![identity_key, device_id],
)
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(deleted > 0)
}
fn device_count(&self, identity_key: &[u8]) -> Result<usize, StorageError> {
let conn = self.lock_conn()?;
let count: i64 = conn
.query_row(
"SELECT COUNT(*) FROM devices WHERE identity_key = ?1",
params![identity_key],
|row| row.get(0),
)
.map_err(|e| StorageError::Db(e.to_string()))?;
Ok(count as usize)
}
}
/// Convenience extension for `rusqlite::OptionalExtension`.

View File

@@ -184,6 +184,21 @@ pub trait Store: Send + Sync {
/// user identity key mapping, and the user record itself.
/// Does NOT delete KT log entries (append-only for auditability).
fn delete_account(&self, identity_key: &[u8]) -> Result<(), StorageError>;
// ── Device registry ─────────────────────────────────────────────────────
/// Register a device for an identity. Returns false if the device already exists.
/// Caller must check device_count < 5 before calling.
fn register_device(&self, identity_key: &[u8], device_id: &[u8], device_name: &str) -> Result<bool, StorageError>;
/// List all registered devices for an identity: (device_id, name, registered_at).
fn list_devices(&self, identity_key: &[u8]) -> Result<Vec<(Vec<u8>, String, u64)>, StorageError>;
/// Revoke (remove) a registered device. Returns false if not found.
fn revoke_device(&self, identity_key: &[u8], device_id: &[u8]) -> Result<bool, StorageError>;
/// Return the number of registered devices for an identity.
fn device_count(&self, identity_key: &[u8]) -> Result<usize, StorageError>;
}
// ── ChannelKey ───────────────────────────────────────────────────────────────
@@ -247,6 +262,8 @@ pub struct FileBackedStore {
users: Mutex<HashMap<String, Vec<u8>>>,
identity_keys: Mutex<HashMap<String, Vec<u8>>>,
endpoints: Mutex<HashMap<Vec<u8>, Vec<u8>>>,
/// Device registry: identity_key -> Vec<(device_id, device_name, registered_at)>
devices: Mutex<HashMap<Vec<u8>, Vec<(Vec<u8>, String, u64)>>>,
}
impl FileBackedStore {
@@ -289,6 +306,7 @@ impl FileBackedStore {
users,
identity_keys,
endpoints: Mutex::new(HashMap::new()),
devices: Mutex::new(HashMap::new()),
})
}
@@ -837,8 +855,49 @@ impl Store for FileBackedStore {
ep.remove(identity_key);
}
// Remove devices.
{
let mut dev = lock(&self.devices)?;
dev.remove(identity_key);
}
Ok(())
}
fn register_device(&self, identity_key: &[u8], device_id: &[u8], device_name: &str) -> Result<bool, StorageError> {
let mut map = lock(&self.devices)?;
let devices = map.entry(identity_key.to_vec()).or_default();
if devices.iter().any(|(id, _, _)| id == device_id) {
return Ok(false);
}
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
devices.push((device_id.to_vec(), device_name.to_string(), now));
Ok(true)
}
fn list_devices(&self, identity_key: &[u8]) -> Result<Vec<(Vec<u8>, String, u64)>, StorageError> {
let map = lock(&self.devices)?;
Ok(map.get(identity_key).cloned().unwrap_or_default())
}
fn revoke_device(&self, identity_key: &[u8], device_id: &[u8]) -> Result<bool, StorageError> {
let mut map = lock(&self.devices)?;
if let Some(devices) = map.get_mut(identity_key) {
let before = devices.len();
devices.retain(|(id, _, _)| id != device_id);
Ok(devices.len() < before)
} else {
Ok(false)
}
}
fn device_count(&self, identity_key: &[u8]) -> Result<usize, StorageError> {
let map = lock(&self.devices)?;
Ok(map.get(identity_key).map(|v| v.len()).unwrap_or(0))
}
}
#[cfg(test)]