Phase 1 — Foundation: - Constant-time token comparison via subtle::ConstantTimeEq (Fix 11) - Structured error codes E001–E020 in new error_codes.rs (Fix 15) - Remove dead envelope.capnp code and related types (Fix 16) Phase 2 — Auth Hardening: - Registration collision check via has_user_record() (Fix 5) - Auth required on uploadHybridKey/fetchHybridKey RPCs (Fix 1) - Identity-token binding at registration and login (Fix 2) - Session token expiry with 24h TTL and background reaper (Fix 3) - Bounded pending logins with 5-minute timeout (Fix 4) Phase 3 — Resource Limits: - Rate limiting: 100 enqueues/60s per token (Fix 6) - Queue depth cap at 1000 + 7-day message TTL/GC (Fix 7) - Partial queue drain via limit param on fetch/fetchWait (Fix 8) Phase 4 — Crypto Fixes: - OPAQUE KSF switched from Identity to Argon2id (Fix 10) - Random AEAD nonce in hybrid KEM instead of HKDF-derived (Fix 12) - Zeroize secret fields in HybridKeypairBytes (Fix 13) - Encrypted client state files via QPCE format (Fix 9) Phase 5 — Protocol: - Commit fan-out to all existing members on invite (Fix 14) - Add member_identities() to GroupMember Breaking: existing OPAQUE registrations invalidated (Argon2 KSF). Schema: added auth to hybrid key ops, identityKey to OPAQUE finish RPCs, limit to fetch/fetchWait. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
485 lines
16 KiB
Rust
485 lines
16 KiB
Rust
use std::{
|
|
collections::{HashMap, VecDeque},
|
|
fs,
|
|
hash::Hash,
|
|
path::{Path, PathBuf},
|
|
sync::Mutex,
|
|
};
|
|
|
|
use serde::{Deserialize, Serialize};
|
|
|
|
#[derive(thiserror::Error, Debug)]
|
|
pub enum StorageError {
|
|
#[error("io error: {0}")]
|
|
Io(String),
|
|
#[error("serialization error")]
|
|
Serde,
|
|
#[error("database error: {0}")]
|
|
Db(String),
|
|
}
|
|
|
|
// ── Store trait ──────────────────────────────────────────────────────────────
|
|
|
|
/// Abstraction over storage backends (file-backed, SQLCipher, etc.).
|
|
pub trait Store: Send + Sync {
|
|
fn upload_key_package(
|
|
&self,
|
|
identity_key: &[u8],
|
|
package: Vec<u8>,
|
|
) -> Result<(), StorageError>;
|
|
|
|
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
|
|
|
|
fn enqueue(
|
|
&self,
|
|
recipient_key: &[u8],
|
|
channel_id: &[u8],
|
|
payload: Vec<u8>,
|
|
) -> Result<(), StorageError>;
|
|
|
|
fn fetch(
|
|
&self,
|
|
recipient_key: &[u8],
|
|
channel_id: &[u8],
|
|
) -> Result<Vec<Vec<u8>>, StorageError>;
|
|
|
|
/// Fetch up to `limit` messages without draining the entire queue (Fix 8).
|
|
fn fetch_limited(
|
|
&self,
|
|
recipient_key: &[u8],
|
|
channel_id: &[u8],
|
|
limit: usize,
|
|
) -> Result<Vec<Vec<u8>>, StorageError>;
|
|
|
|
/// Return the number of queued messages for (recipient, channel) (Fix 7).
|
|
fn queue_depth(
|
|
&self,
|
|
recipient_key: &[u8],
|
|
channel_id: &[u8],
|
|
) -> Result<usize, StorageError>;
|
|
|
|
/// Delete messages older than `max_age_secs`. Returns count deleted (Fix 7).
|
|
fn gc_expired_messages(&self, max_age_secs: u64) -> Result<usize, StorageError>;
|
|
|
|
fn upload_hybrid_key(
|
|
&self,
|
|
identity_key: &[u8],
|
|
hybrid_pk: Vec<u8>,
|
|
) -> Result<(), StorageError>;
|
|
|
|
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
|
|
|
|
/// Store the OPAQUE `ServerSetup` (generated once, loaded on restart).
|
|
fn store_server_setup(&self, setup: Vec<u8>) -> Result<(), StorageError>;
|
|
|
|
/// Load the persisted `ServerSetup`, if any.
|
|
fn get_server_setup(&self) -> Result<Option<Vec<u8>>, StorageError>;
|
|
|
|
/// Store an OPAQUE user record (serialized `ServerRegistration`).
|
|
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError>;
|
|
|
|
/// Retrieve an OPAQUE user record by username.
|
|
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError>;
|
|
|
|
/// Check if a user record already exists (Fix 5).
|
|
fn has_user_record(&self, username: &str) -> Result<bool, StorageError>;
|
|
|
|
/// Store identity key for a user (Fix 2).
|
|
fn store_user_identity_key(
|
|
&self,
|
|
username: &str,
|
|
identity_key: Vec<u8>,
|
|
) -> Result<(), StorageError>;
|
|
|
|
/// Retrieve identity key for a user (Fix 2).
|
|
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError>;
|
|
|
|
/// Publish a P2P endpoint address for an identity key.
|
|
fn publish_endpoint(
|
|
&self,
|
|
identity_key: &[u8],
|
|
node_addr: Vec<u8>,
|
|
) -> Result<(), StorageError>;
|
|
|
|
/// Resolve a peer's P2P endpoint address.
|
|
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
|
|
}
|
|
|
|
// ── ChannelKey ───────────────────────────────────────────────────────────────
|
|
|
|
#[derive(Serialize, Deserialize, Clone, Eq, PartialEq, Debug)]
|
|
pub struct ChannelKey {
|
|
pub channel_id: Vec<u8>,
|
|
pub recipient_key: Vec<u8>,
|
|
}
|
|
|
|
impl Hash for ChannelKey {
|
|
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
|
self.channel_id.hash(state);
|
|
self.recipient_key.hash(state);
|
|
}
|
|
}
|
|
|
|
// ── FileBackedStore ──────────────────────────────────────────────────────────
|
|
|
|
#[derive(Serialize, Deserialize, Default)]
|
|
struct QueueMapV1 {
|
|
map: HashMap<Vec<u8>, VecDeque<Vec<u8>>>,
|
|
}
|
|
|
|
#[derive(Serialize, Deserialize, Default)]
|
|
struct QueueMapV2 {
|
|
map: HashMap<ChannelKey, VecDeque<Vec<u8>>>,
|
|
}
|
|
|
|
/// File-backed storage for KeyPackages and delivery queues.
|
|
///
|
|
/// Each mutation flushes the entire map to disk. Suitable for MVP-scale loads.
|
|
pub struct FileBackedStore {
|
|
kp_path: PathBuf,
|
|
ds_path: PathBuf,
|
|
hk_path: PathBuf,
|
|
setup_path: PathBuf,
|
|
users_path: PathBuf,
|
|
identity_keys_path: PathBuf,
|
|
key_packages: Mutex<HashMap<Vec<u8>, VecDeque<Vec<u8>>>>,
|
|
deliveries: Mutex<HashMap<ChannelKey, VecDeque<Vec<u8>>>>,
|
|
hybrid_keys: Mutex<HashMap<Vec<u8>, Vec<u8>>>,
|
|
users: Mutex<HashMap<String, Vec<u8>>>,
|
|
identity_keys: Mutex<HashMap<String, Vec<u8>>>,
|
|
endpoints: Mutex<HashMap<Vec<u8>, Vec<u8>>>,
|
|
}
|
|
|
|
impl FileBackedStore {
|
|
pub fn open(dir: impl AsRef<Path>) -> Result<Self, StorageError> {
|
|
let dir = dir.as_ref();
|
|
if !dir.exists() {
|
|
fs::create_dir_all(dir).map_err(|e| StorageError::Io(e.to_string()))?;
|
|
}
|
|
let kp_path = dir.join("keypackages.bin");
|
|
let ds_path = dir.join("deliveries.bin");
|
|
let hk_path = dir.join("hybridkeys.bin");
|
|
let setup_path = dir.join("server_setup.bin");
|
|
let users_path = dir.join("users.bin");
|
|
let identity_keys_path = dir.join("identity_keys.bin");
|
|
|
|
let key_packages = Mutex::new(Self::load_kp_map(&kp_path)?);
|
|
let deliveries = Mutex::new(Self::load_delivery_map(&ds_path)?);
|
|
let hybrid_keys = Mutex::new(Self::load_hybrid_keys(&hk_path)?);
|
|
let users = Mutex::new(Self::load_users(&users_path)?);
|
|
let identity_keys = Mutex::new(Self::load_map_string_bytes(&identity_keys_path)?);
|
|
|
|
Ok(Self {
|
|
kp_path,
|
|
ds_path,
|
|
hk_path,
|
|
setup_path,
|
|
users_path,
|
|
identity_keys_path,
|
|
key_packages,
|
|
deliveries,
|
|
hybrid_keys,
|
|
users,
|
|
identity_keys,
|
|
endpoints: Mutex::new(HashMap::new()),
|
|
})
|
|
}
|
|
|
|
fn load_kp_map(path: &Path) -> Result<HashMap<Vec<u8>, VecDeque<Vec<u8>>>, StorageError> {
|
|
if !path.exists() {
|
|
return Ok(HashMap::new());
|
|
}
|
|
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
|
if bytes.is_empty() {
|
|
return Ok(HashMap::new());
|
|
}
|
|
let map: QueueMapV1 = bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)?;
|
|
Ok(map.map)
|
|
}
|
|
|
|
fn flush_kp_map(
|
|
&self,
|
|
path: &Path,
|
|
map: &HashMap<Vec<u8>, VecDeque<Vec<u8>>>,
|
|
) -> Result<(), StorageError> {
|
|
let payload = QueueMapV1 { map: map.clone() };
|
|
let bytes = bincode::serialize(&payload).map_err(|_| StorageError::Serde)?;
|
|
if let Some(parent) = path.parent() {
|
|
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
|
}
|
|
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
|
}
|
|
|
|
fn load_delivery_map(path: &Path) -> Result<HashMap<ChannelKey, VecDeque<Vec<u8>>>, StorageError> {
|
|
if !path.exists() {
|
|
return Ok(HashMap::new());
|
|
}
|
|
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
|
if bytes.is_empty() {
|
|
return Ok(HashMap::new());
|
|
}
|
|
// Try v2 format (channel-aware). Fallback to legacy v1 for upgrade.
|
|
if let Ok(map) = bincode::deserialize::<QueueMapV2>(&bytes) {
|
|
return Ok(map.map);
|
|
}
|
|
let legacy: QueueMapV1 = bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)?;
|
|
let mut upgraded = HashMap::new();
|
|
for (recipient_key, queue) in legacy.map.into_iter() {
|
|
upgraded.insert(
|
|
ChannelKey {
|
|
channel_id: Vec::new(),
|
|
recipient_key,
|
|
},
|
|
queue,
|
|
);
|
|
}
|
|
Ok(upgraded)
|
|
}
|
|
|
|
fn flush_delivery_map(
|
|
&self,
|
|
path: &Path,
|
|
map: &HashMap<ChannelKey, VecDeque<Vec<u8>>>,
|
|
) -> Result<(), StorageError> {
|
|
let payload = QueueMapV2 { map: map.clone() };
|
|
let bytes = bincode::serialize(&payload).map_err(|_| StorageError::Serde)?;
|
|
if let Some(parent) = path.parent() {
|
|
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
|
}
|
|
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
|
}
|
|
|
|
fn load_hybrid_keys(path: &Path) -> Result<HashMap<Vec<u8>, Vec<u8>>, StorageError> {
|
|
if !path.exists() {
|
|
return Ok(HashMap::new());
|
|
}
|
|
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
|
if bytes.is_empty() {
|
|
return Ok(HashMap::new());
|
|
}
|
|
bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)
|
|
}
|
|
|
|
fn flush_hybrid_keys(
|
|
&self,
|
|
path: &Path,
|
|
map: &HashMap<Vec<u8>, Vec<u8>>,
|
|
) -> Result<(), StorageError> {
|
|
let bytes = bincode::serialize(map).map_err(|_| StorageError::Serde)?;
|
|
if let Some(parent) = path.parent() {
|
|
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
|
}
|
|
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
|
}
|
|
|
|
fn load_users(path: &Path) -> Result<HashMap<String, Vec<u8>>, StorageError> {
|
|
if !path.exists() {
|
|
return Ok(HashMap::new());
|
|
}
|
|
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
|
if bytes.is_empty() {
|
|
return Ok(HashMap::new());
|
|
}
|
|
bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)
|
|
}
|
|
|
|
fn flush_users(
|
|
&self,
|
|
path: &Path,
|
|
map: &HashMap<String, Vec<u8>>,
|
|
) -> Result<(), StorageError> {
|
|
let bytes = bincode::serialize(map).map_err(|_| StorageError::Serde)?;
|
|
if let Some(parent) = path.parent() {
|
|
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
|
}
|
|
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
|
}
|
|
|
|
fn load_map_string_bytes(path: &Path) -> Result<HashMap<String, Vec<u8>>, StorageError> {
|
|
Self::load_users(path)
|
|
}
|
|
|
|
fn flush_map_string_bytes(
|
|
&self,
|
|
path: &Path,
|
|
map: &HashMap<String, Vec<u8>>,
|
|
) -> Result<(), StorageError> {
|
|
self.flush_users(path, map)
|
|
}
|
|
}
|
|
|
|
impl Store for FileBackedStore {
|
|
fn upload_key_package(
|
|
&self,
|
|
identity_key: &[u8],
|
|
package: Vec<u8>,
|
|
) -> Result<(), StorageError> {
|
|
let mut map = self.key_packages.lock().unwrap();
|
|
map.entry(identity_key.to_vec())
|
|
.or_default()
|
|
.push_back(package);
|
|
self.flush_kp_map(&self.kp_path, &*map)
|
|
}
|
|
|
|
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
|
let mut map = self.key_packages.lock().unwrap();
|
|
let package = map.get_mut(identity_key).and_then(|q| q.pop_front());
|
|
self.flush_kp_map(&self.kp_path, &*map)?;
|
|
Ok(package)
|
|
}
|
|
|
|
fn enqueue(
|
|
&self,
|
|
recipient_key: &[u8],
|
|
channel_id: &[u8],
|
|
payload: Vec<u8>,
|
|
) -> Result<(), StorageError> {
|
|
let mut map = self.deliveries.lock().unwrap();
|
|
let key = ChannelKey {
|
|
channel_id: channel_id.to_vec(),
|
|
recipient_key: recipient_key.to_vec(),
|
|
};
|
|
map.entry(key)
|
|
.or_default()
|
|
.push_back(payload);
|
|
self.flush_delivery_map(&self.ds_path, &*map)
|
|
}
|
|
|
|
fn fetch(
|
|
&self,
|
|
recipient_key: &[u8],
|
|
channel_id: &[u8],
|
|
) -> Result<Vec<Vec<u8>>, StorageError> {
|
|
let mut map = self.deliveries.lock().unwrap();
|
|
let key = ChannelKey {
|
|
channel_id: channel_id.to_vec(),
|
|
recipient_key: recipient_key.to_vec(),
|
|
};
|
|
let messages = map
|
|
.get_mut(&key)
|
|
.map(|q| q.drain(..).collect())
|
|
.unwrap_or_default();
|
|
self.flush_delivery_map(&self.ds_path, &*map)?;
|
|
Ok(messages)
|
|
}
|
|
|
|
fn fetch_limited(
|
|
&self,
|
|
recipient_key: &[u8],
|
|
channel_id: &[u8],
|
|
limit: usize,
|
|
) -> Result<Vec<Vec<u8>>, StorageError> {
|
|
let mut map = self.deliveries.lock().unwrap();
|
|
let key = ChannelKey {
|
|
channel_id: channel_id.to_vec(),
|
|
recipient_key: recipient_key.to_vec(),
|
|
};
|
|
let messages = map
|
|
.get_mut(&key)
|
|
.map(|q| {
|
|
let count = limit.min(q.len());
|
|
q.drain(..count).collect()
|
|
})
|
|
.unwrap_or_default();
|
|
self.flush_delivery_map(&self.ds_path, &*map)?;
|
|
Ok(messages)
|
|
}
|
|
|
|
fn queue_depth(
|
|
&self,
|
|
recipient_key: &[u8],
|
|
channel_id: &[u8],
|
|
) -> Result<usize, StorageError> {
|
|
let map = self.deliveries.lock().unwrap();
|
|
let key = ChannelKey {
|
|
channel_id: channel_id.to_vec(),
|
|
recipient_key: recipient_key.to_vec(),
|
|
};
|
|
Ok(map.get(&key).map(|q| q.len()).unwrap_or(0))
|
|
}
|
|
|
|
fn gc_expired_messages(&self, _max_age_secs: u64) -> Result<usize, StorageError> {
|
|
// FileBackedStore does not track timestamps per message — no-op.
|
|
Ok(0)
|
|
}
|
|
|
|
fn upload_hybrid_key(
|
|
&self,
|
|
identity_key: &[u8],
|
|
hybrid_pk: Vec<u8>,
|
|
) -> Result<(), StorageError> {
|
|
let mut map = self.hybrid_keys.lock().unwrap();
|
|
map.insert(identity_key.to_vec(), hybrid_pk);
|
|
self.flush_hybrid_keys(&self.hk_path, &*map)
|
|
}
|
|
|
|
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
|
let map = self.hybrid_keys.lock().unwrap();
|
|
Ok(map.get(identity_key).cloned())
|
|
}
|
|
|
|
fn store_server_setup(&self, setup: Vec<u8>) -> Result<(), StorageError> {
|
|
if let Some(parent) = self.setup_path.parent() {
|
|
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
|
}
|
|
fs::write(&self.setup_path, setup).map_err(|e| StorageError::Io(e.to_string()))
|
|
}
|
|
|
|
fn get_server_setup(&self) -> Result<Option<Vec<u8>>, StorageError> {
|
|
if !self.setup_path.exists() {
|
|
return Ok(None);
|
|
}
|
|
let bytes = fs::read(&self.setup_path).map_err(|e| StorageError::Io(e.to_string()))?;
|
|
if bytes.is_empty() {
|
|
return Ok(None);
|
|
}
|
|
Ok(Some(bytes))
|
|
}
|
|
|
|
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError> {
|
|
let mut map = self.users.lock().unwrap();
|
|
map.insert(username.to_string(), record);
|
|
self.flush_users(&self.users_path, &*map)
|
|
}
|
|
|
|
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
|
let map = self.users.lock().unwrap();
|
|
Ok(map.get(username).cloned())
|
|
}
|
|
|
|
fn has_user_record(&self, username: &str) -> Result<bool, StorageError> {
|
|
let map = self.users.lock().unwrap();
|
|
Ok(map.contains_key(username))
|
|
}
|
|
|
|
fn store_user_identity_key(
|
|
&self,
|
|
username: &str,
|
|
identity_key: Vec<u8>,
|
|
) -> Result<(), StorageError> {
|
|
let mut map = self.identity_keys.lock().unwrap();
|
|
map.insert(username.to_string(), identity_key);
|
|
self.flush_map_string_bytes(&self.identity_keys_path, &*map)
|
|
}
|
|
|
|
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
|
let map = self.identity_keys.lock().unwrap();
|
|
Ok(map.get(username).cloned())
|
|
}
|
|
|
|
fn publish_endpoint(
|
|
&self,
|
|
identity_key: &[u8],
|
|
node_addr: Vec<u8>,
|
|
) -> Result<(), StorageError> {
|
|
let mut map = self.endpoints.lock().unwrap();
|
|
map.insert(identity_key.to_vec(), node_addr);
|
|
Ok(())
|
|
}
|
|
|
|
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
|
let map = self.endpoints.lock().unwrap();
|
|
Ok(map.get(identity_key).cloned())
|
|
}
|
|
}
|