fix: address 16 architecture design flaws across all crates
Phase 1 — Foundation: - Constant-time token comparison via subtle::ConstantTimeEq (Fix 11) - Structured error codes E001–E020 in new error_codes.rs (Fix 15) - Remove dead envelope.capnp code and related types (Fix 16) Phase 2 — Auth Hardening: - Registration collision check via has_user_record() (Fix 5) - Auth required on uploadHybridKey/fetchHybridKey RPCs (Fix 1) - Identity-token binding at registration and login (Fix 2) - Session token expiry with 24h TTL and background reaper (Fix 3) - Bounded pending logins with 5-minute timeout (Fix 4) Phase 3 — Resource Limits: - Rate limiting: 100 enqueues/60s per token (Fix 6) - Queue depth cap at 1000 + 7-day message TTL/GC (Fix 7) - Partial queue drain via limit param on fetch/fetchWait (Fix 8) Phase 4 — Crypto Fixes: - OPAQUE KSF switched from Identity to Argon2id (Fix 10) - Random AEAD nonce in hybrid KEM instead of HKDF-derived (Fix 12) - Zeroize secret fields in HybridKeypairBytes (Fix 13) - Encrypted client state files via QPCE format (Fix 9) Phase 5 — Protocol: - Commit fan-out to all existing members on invite (Fix 14) - Add member_identities() to GroupMember Breaking: existing OPAQUE registrations invalidated (Argon2 KSF). Schema: added auth to hybrid key ops, identityKey to OPAQUE finish RPCs, limit to fetch/fetchWait. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
30
crates/quicnprotochat-server/src/error_codes.rs
Normal file
30
crates/quicnprotochat-server/src/error_codes.rs
Normal file
@@ -0,0 +1,30 @@
|
||||
//! Structured error codes for server RPC responses.
|
||||
//!
|
||||
//! Every `capnp::Error::failed()` message is prefixed with a stable code
|
||||
//! (E001–E020) so clients can match on the code without parsing free-text.
|
||||
|
||||
pub const E001_BAD_AUTH_VERSION: &str = "E001";
|
||||
pub const E002_EMPTY_TOKEN: &str = "E002";
|
||||
pub const E003_INVALID_TOKEN: &str = "E003";
|
||||
pub const E004_IDENTITY_KEY_LENGTH: &str = "E004";
|
||||
pub const E005_PAYLOAD_EMPTY: &str = "E005";
|
||||
pub const E006_PAYLOAD_TOO_LARGE: &str = "E006";
|
||||
pub const E007_PACKAGE_EMPTY: &str = "E007";
|
||||
pub const E008_PACKAGE_TOO_LARGE: &str = "E008";
|
||||
pub const E009_STORAGE_ERROR: &str = "E009";
|
||||
pub const E010_OPAQUE_ERROR: &str = "E010";
|
||||
pub const E011_USERNAME_EMPTY: &str = "E011";
|
||||
pub const E012_WIRE_VERSION: &str = "E012";
|
||||
pub const E013_HYBRID_KEY_EMPTY: &str = "E013";
|
||||
pub const E014_RATE_LIMITED: &str = "E014";
|
||||
pub const E015_QUEUE_FULL: &str = "E015";
|
||||
pub const E016_IDENTITY_MISMATCH: &str = "E016";
|
||||
pub const E017_SESSION_EXPIRED: &str = "E017";
|
||||
pub const E018_USER_EXISTS: &str = "E018";
|
||||
pub const E019_NO_PENDING_LOGIN: &str = "E019";
|
||||
pub const E020_BAD_PARAMS: &str = "E020";
|
||||
|
||||
/// Build a `capnp::Error::failed()` with the structured code prefix.
|
||||
pub fn coded_error(code: &str, msg: impl std::fmt::Display) -> capnp::Error {
|
||||
capnp::Error::failed(format!("{code}: {msg}"))
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,8 +1,4 @@
|
||||
//! SQLCipher-backed persistent storage.
|
||||
//!
|
||||
//! Uses `rusqlite` with `bundled-sqlcipher` for encrypted-at-rest storage.
|
||||
//! Implements the same [`Store`] trait as [`FileBackedStore`] but with proper
|
||||
//! ACID transactions and indexed queries.
|
||||
|
||||
use std::path::Path;
|
||||
use std::sync::Mutex;
|
||||
@@ -12,18 +8,11 @@ use rusqlite::{params, Connection};
|
||||
use crate::storage::{StorageError, Store};
|
||||
|
||||
/// SQLCipher-encrypted storage backend.
|
||||
///
|
||||
/// All data is stored in a single encrypted SQLite database. The encryption
|
||||
/// key is set via `PRAGMA key` at open time.
|
||||
pub struct SqlStore {
|
||||
conn: Mutex<Connection>,
|
||||
}
|
||||
|
||||
impl SqlStore {
|
||||
/// Open (or create) an encrypted database at `path`.
|
||||
///
|
||||
/// `key` is the passphrase used by SQLCipher. Pass an empty string for an
|
||||
/// unencrypted database (useful for testing).
|
||||
pub fn open(path: impl AsRef<Path>, key: &str) -> Result<Self, StorageError> {
|
||||
let conn = Connection::open(path).map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
@@ -32,7 +21,6 @@ impl SqlStore {
|
||||
.map_err(|e| StorageError::Db(format!("PRAGMA key failed: {e}")))?;
|
||||
}
|
||||
|
||||
// Performance pragmas — safe for a single-writer server.
|
||||
conn.execute_batch(
|
||||
"PRAGMA journal_mode = WAL;
|
||||
PRAGMA synchronous = NORMAL;
|
||||
@@ -47,7 +35,6 @@ impl SqlStore {
|
||||
Ok(store)
|
||||
}
|
||||
|
||||
/// Create schema tables if they don't exist yet.
|
||||
fn migrate(&self) -> Result<(), StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
conn.execute_batch(
|
||||
@@ -86,6 +73,17 @@ impl SqlStore {
|
||||
username TEXT PRIMARY KEY,
|
||||
opaque_record BLOB NOT NULL,
|
||||
created_at INTEGER DEFAULT (strftime('%s','now'))
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS user_identity_keys (
|
||||
username TEXT PRIMARY KEY,
|
||||
identity_key BLOB NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS endpoints (
|
||||
identity_key BLOB PRIMARY KEY,
|
||||
node_addr BLOB NOT NULL,
|
||||
updated_at INTEGER DEFAULT (strftime('%s','now'))
|
||||
);",
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
@@ -111,7 +109,6 @@ impl Store for SqlStore {
|
||||
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
|
||||
// Find the oldest KeyPackage (FIFO) and delete it atomically.
|
||||
let mut stmt = conn
|
||||
.prepare(
|
||||
"SELECT id, package_data FROM key_packages
|
||||
@@ -178,7 +175,6 @@ impl Store for SqlStore {
|
||||
|
||||
if !rows.is_empty() {
|
||||
let ids: Vec<i64> = rows.iter().map(|(id, _)| *id).collect();
|
||||
// Delete fetched rows in a single statement.
|
||||
let placeholders: String = ids.iter().map(|_| "?").collect::<Vec<_>>().join(",");
|
||||
let sql = format!("DELETE FROM deliveries WHERE id IN ({placeholders})");
|
||||
let params: Vec<&dyn rusqlite::types::ToSql> =
|
||||
@@ -190,6 +186,76 @@ impl Store for SqlStore {
|
||||
Ok(rows.into_iter().map(|(_, payload)| payload).collect())
|
||||
}
|
||||
|
||||
fn fetch_limited(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
limit: usize,
|
||||
) -> Result<Vec<Vec<u8>>, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
|
||||
let mut stmt = conn
|
||||
.prepare(
|
||||
"SELECT id, payload FROM deliveries
|
||||
WHERE recipient_key = ?1 AND channel_id = ?2
|
||||
ORDER BY id ASC
|
||||
LIMIT ?3",
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
let rows: Vec<(i64, Vec<u8>)> = stmt
|
||||
.query_map(params![recipient_key, channel_id, limit as i64], |row| {
|
||||
Ok((row.get(0)?, row.get(1)?))
|
||||
})
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
if !rows.is_empty() {
|
||||
let ids: Vec<i64> = rows.iter().map(|(id, _)| *id).collect();
|
||||
let placeholders: String = ids.iter().map(|_| "?").collect::<Vec<_>>().join(",");
|
||||
let sql = format!("DELETE FROM deliveries WHERE id IN ({placeholders})");
|
||||
let params: Vec<&dyn rusqlite::types::ToSql> =
|
||||
ids.iter().map(|id| id as &dyn rusqlite::types::ToSql).collect();
|
||||
conn.execute(&sql, params.as_slice())
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
}
|
||||
|
||||
Ok(rows.into_iter().map(|(_, payload)| payload).collect())
|
||||
}
|
||||
|
||||
fn queue_depth(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<usize, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let count: i64 = conn
|
||||
.query_row(
|
||||
"SELECT COUNT(*) FROM deliveries WHERE recipient_key = ?1 AND channel_id = ?2",
|
||||
params![recipient_key, channel_id],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(count as usize)
|
||||
}
|
||||
|
||||
fn gc_expired_messages(&self, max_age_secs: u64) -> Result<usize, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let cutoff = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
.saturating_sub(max_age_secs);
|
||||
let deleted = conn
|
||||
.execute(
|
||||
"DELETE FROM deliveries WHERE created_at < ?1",
|
||||
params![cutoff as i64],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(deleted)
|
||||
}
|
||||
|
||||
fn upload_hybrid_key(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
@@ -256,6 +322,68 @@ impl Store for SqlStore {
|
||||
.optional()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))
|
||||
}
|
||||
|
||||
fn has_user_record(&self, username: &str) -> Result<bool, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let exists: bool = conn
|
||||
.query_row(
|
||||
"SELECT EXISTS(SELECT 1 FROM users WHERE username = ?1)",
|
||||
params![username],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(exists)
|
||||
}
|
||||
|
||||
fn store_user_identity_key(
|
||||
&self,
|
||||
username: &str,
|
||||
identity_key: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO user_identity_keys (username, identity_key) VALUES (?1, ?2)",
|
||||
params![username, identity_key],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let mut stmt = conn
|
||||
.prepare("SELECT identity_key FROM user_identity_keys WHERE username = ?1")
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
stmt.query_row(params![username], |row| row.get(0))
|
||||
.optional()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))
|
||||
}
|
||||
|
||||
fn publish_endpoint(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
node_addr: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO endpoints (identity_key, node_addr) VALUES (?1, ?2)",
|
||||
params![identity_key, node_addr],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let mut stmt = conn
|
||||
.prepare("SELECT node_addr FROM endpoints WHERE identity_key = ?1")
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
stmt.query_row(params![identity_key], |row| row.get(0))
|
||||
.optional()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Convenience extension for `rusqlite::OptionalExtension`.
|
||||
@@ -284,10 +412,8 @@ mod tests {
|
||||
#[test]
|
||||
fn key_package_fifo() {
|
||||
let store = open_in_memory();
|
||||
let ik = b"alice_identity_key__32bytes_long";
|
||||
// Pad to 32 bytes to match real usage
|
||||
let mut identity = [0u8; 32];
|
||||
identity[..ik.len()].copy_from_slice(ik);
|
||||
identity[..31].copy_from_slice(b"alice_identity_key__32bytes_lon");
|
||||
|
||||
store
|
||||
.upload_key_package(&identity, b"kp1".to_vec())
|
||||
@@ -319,10 +445,55 @@ mod tests {
|
||||
let msgs = store.fetch(&rk, ch).unwrap();
|
||||
assert_eq!(msgs, vec![b"msg1".to_vec(), b"msg2".to_vec()]);
|
||||
|
||||
// Queue is drained.
|
||||
assert!(store.fetch(&rk, ch).unwrap().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fetch_limited_partial_drain() {
|
||||
let store = open_in_memory();
|
||||
let rk = [5u8; 32];
|
||||
let ch = b"ch";
|
||||
|
||||
store.enqueue(&rk, ch, b"a".to_vec()).unwrap();
|
||||
store.enqueue(&rk, ch, b"b".to_vec()).unwrap();
|
||||
store.enqueue(&rk, ch, b"c".to_vec()).unwrap();
|
||||
|
||||
let msgs = store.fetch_limited(&rk, ch, 2).unwrap();
|
||||
assert_eq!(msgs, vec![b"a".to_vec(), b"b".to_vec()]);
|
||||
|
||||
let remaining = store.fetch(&rk, ch).unwrap();
|
||||
assert_eq!(remaining, vec![b"c".to_vec()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn queue_depth_count() {
|
||||
let store = open_in_memory();
|
||||
let rk = [6u8; 32];
|
||||
let ch = b"ch";
|
||||
|
||||
assert_eq!(store.queue_depth(&rk, ch).unwrap(), 0);
|
||||
store.enqueue(&rk, ch, b"x".to_vec()).unwrap();
|
||||
store.enqueue(&rk, ch, b"y".to_vec()).unwrap();
|
||||
assert_eq!(store.queue_depth(&rk, ch).unwrap(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn has_user_record_check() {
|
||||
let store = open_in_memory();
|
||||
assert!(!store.has_user_record("alice").unwrap());
|
||||
store.store_user_record("alice", b"record".to_vec()).unwrap();
|
||||
assert!(store.has_user_record("alice").unwrap());
|
||||
assert!(!store.has_user_record("bob").unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn user_identity_key_round_trip() {
|
||||
let store = open_in_memory();
|
||||
assert!(store.get_user_identity_key("alice").unwrap().is_none());
|
||||
store.store_user_identity_key("alice", vec![1u8; 32]).unwrap();
|
||||
assert_eq!(store.get_user_identity_key("alice").unwrap(), Some(vec![1u8; 32]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hybrid_key_round_trip() {
|
||||
let store = open_in_memory();
|
||||
@@ -333,24 +504,6 @@ mod tests {
|
||||
assert_eq!(store.fetch_hybrid_key(&ik).unwrap(), Some(pk));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hybrid_key_upsert() {
|
||||
let store = open_in_memory();
|
||||
let ik = [3u8; 32];
|
||||
|
||||
store
|
||||
.upload_hybrid_key(&ik, b"v1".to_vec())
|
||||
.unwrap();
|
||||
store
|
||||
.upload_hybrid_key(&ik, b"v2".to_vec())
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
store.fetch_hybrid_key(&ik).unwrap(),
|
||||
Some(b"v2".to_vec())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn separate_channels_isolated() {
|
||||
let store = open_in_memory();
|
||||
|
||||
@@ -43,6 +43,24 @@ pub trait Store: Send + Sync {
|
||||
channel_id: &[u8],
|
||||
) -> Result<Vec<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Fetch up to `limit` messages without draining the entire queue (Fix 8).
|
||||
fn fetch_limited(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
limit: usize,
|
||||
) -> Result<Vec<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Return the number of queued messages for (recipient, channel) (Fix 7).
|
||||
fn queue_depth(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<usize, StorageError>;
|
||||
|
||||
/// Delete messages older than `max_age_secs`. Returns count deleted (Fix 7).
|
||||
fn gc_expired_messages(&self, max_age_secs: u64) -> Result<usize, StorageError>;
|
||||
|
||||
fn upload_hybrid_key(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
@@ -62,6 +80,29 @@ pub trait Store: Send + Sync {
|
||||
|
||||
/// Retrieve an OPAQUE user record by username.
|
||||
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Check if a user record already exists (Fix 5).
|
||||
fn has_user_record(&self, username: &str) -> Result<bool, StorageError>;
|
||||
|
||||
/// Store identity key for a user (Fix 2).
|
||||
fn store_user_identity_key(
|
||||
&self,
|
||||
username: &str,
|
||||
identity_key: Vec<u8>,
|
||||
) -> Result<(), StorageError>;
|
||||
|
||||
/// Retrieve identity key for a user (Fix 2).
|
||||
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Publish a P2P endpoint address for an identity key.
|
||||
fn publish_endpoint(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
node_addr: Vec<u8>,
|
||||
) -> Result<(), StorageError>;
|
||||
|
||||
/// Resolve a peer's P2P endpoint address.
|
||||
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
}
|
||||
|
||||
// ── ChannelKey ───────────────────────────────────────────────────────────────
|
||||
@@ -100,10 +141,13 @@ pub struct FileBackedStore {
|
||||
hk_path: PathBuf,
|
||||
setup_path: PathBuf,
|
||||
users_path: PathBuf,
|
||||
identity_keys_path: PathBuf,
|
||||
key_packages: Mutex<HashMap<Vec<u8>, VecDeque<Vec<u8>>>>,
|
||||
deliveries: Mutex<HashMap<ChannelKey, VecDeque<Vec<u8>>>>,
|
||||
hybrid_keys: Mutex<HashMap<Vec<u8>, Vec<u8>>>,
|
||||
users: Mutex<HashMap<String, Vec<u8>>>,
|
||||
identity_keys: Mutex<HashMap<String, Vec<u8>>>,
|
||||
endpoints: Mutex<HashMap<Vec<u8>, Vec<u8>>>,
|
||||
}
|
||||
|
||||
impl FileBackedStore {
|
||||
@@ -117,11 +161,13 @@ impl FileBackedStore {
|
||||
let hk_path = dir.join("hybridkeys.bin");
|
||||
let setup_path = dir.join("server_setup.bin");
|
||||
let users_path = dir.join("users.bin");
|
||||
let identity_keys_path = dir.join("identity_keys.bin");
|
||||
|
||||
let key_packages = Mutex::new(Self::load_kp_map(&kp_path)?);
|
||||
let deliveries = Mutex::new(Self::load_delivery_map(&ds_path)?);
|
||||
let hybrid_keys = Mutex::new(Self::load_hybrid_keys(&hk_path)?);
|
||||
let users = Mutex::new(Self::load_users(&users_path)?);
|
||||
let identity_keys = Mutex::new(Self::load_map_string_bytes(&identity_keys_path)?);
|
||||
|
||||
Ok(Self {
|
||||
kp_path,
|
||||
@@ -129,10 +175,13 @@ impl FileBackedStore {
|
||||
hk_path,
|
||||
setup_path,
|
||||
users_path,
|
||||
identity_keys_path,
|
||||
key_packages,
|
||||
deliveries,
|
||||
hybrid_keys,
|
||||
users,
|
||||
identity_keys,
|
||||
endpoints: Mutex::new(HashMap::new()),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -245,6 +294,18 @@ impl FileBackedStore {
|
||||
}
|
||||
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
||||
}
|
||||
|
||||
fn load_map_string_bytes(path: &Path) -> Result<HashMap<String, Vec<u8>>, StorageError> {
|
||||
Self::load_users(path)
|
||||
}
|
||||
|
||||
fn flush_map_string_bytes(
|
||||
&self,
|
||||
path: &Path,
|
||||
map: &HashMap<String, Vec<u8>>,
|
||||
) -> Result<(), StorageError> {
|
||||
self.flush_users(path, map)
|
||||
}
|
||||
}
|
||||
|
||||
impl Store for FileBackedStore {
|
||||
@@ -302,6 +363,46 @@ impl Store for FileBackedStore {
|
||||
Ok(messages)
|
||||
}
|
||||
|
||||
fn fetch_limited(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
limit: usize,
|
||||
) -> Result<Vec<Vec<u8>>, StorageError> {
|
||||
let mut map = self.deliveries.lock().unwrap();
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
let messages = map
|
||||
.get_mut(&key)
|
||||
.map(|q| {
|
||||
let count = limit.min(q.len());
|
||||
q.drain(..count).collect()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
self.flush_delivery_map(&self.ds_path, &*map)?;
|
||||
Ok(messages)
|
||||
}
|
||||
|
||||
fn queue_depth(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<usize, StorageError> {
|
||||
let map = self.deliveries.lock().unwrap();
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
Ok(map.get(&key).map(|q| q.len()).unwrap_or(0))
|
||||
}
|
||||
|
||||
fn gc_expired_messages(&self, _max_age_secs: u64) -> Result<usize, StorageError> {
|
||||
// FileBackedStore does not track timestamps per message — no-op.
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
fn upload_hybrid_key(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
@@ -345,4 +446,39 @@ impl Store for FileBackedStore {
|
||||
let map = self.users.lock().unwrap();
|
||||
Ok(map.get(username).cloned())
|
||||
}
|
||||
|
||||
fn has_user_record(&self, username: &str) -> Result<bool, StorageError> {
|
||||
let map = self.users.lock().unwrap();
|
||||
Ok(map.contains_key(username))
|
||||
}
|
||||
|
||||
fn store_user_identity_key(
|
||||
&self,
|
||||
username: &str,
|
||||
identity_key: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = self.identity_keys.lock().unwrap();
|
||||
map.insert(username.to_string(), identity_key);
|
||||
self.flush_map_string_bytes(&self.identity_keys_path, &*map)
|
||||
}
|
||||
|
||||
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let map = self.identity_keys.lock().unwrap();
|
||||
Ok(map.get(username).cloned())
|
||||
}
|
||||
|
||||
fn publish_endpoint(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
node_addr: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = self.endpoints.lock().unwrap();
|
||||
map.insert(identity_key.to_vec(), node_addr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let map = self.endpoints.lock().unwrap();
|
||||
Ok(map.get(identity_key).cloned())
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user