chore: rename project quicnprotochat -> quicproquo (binaries: qpq)
Rename the entire workspace:
- Crate packages: quicnprotochat-{core,proto,server,client,gui,p2p,mobile} -> quicproquo-*
- Binary names: quicnprotochat -> qpq, quicnprotochat-server -> qpq-server,
quicnprotochat-gui -> qpq-gui
- Default files: *-state.bin -> qpq-state.bin, *-server.toml -> qpq-server.toml,
*.db -> qpq.db
- Environment variable prefix: QUICNPROTOCHAT_* -> QPQ_*
- App identifier: chat.quicnproto.gui -> chat.quicproquo.gui
- Proto package: quicnprotochat.bench -> quicproquo.bench
- All documentation, Docker, CI, and script references updated
HKDF domain-separation strings and P2P ALPN remain unchanged for
backward compatibility with existing encrypted state and wire protocol.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
823
crates/quicproquo-server/src/storage.rs
Normal file
823
crates/quicproquo-server/src/storage.rs
Normal file
@@ -0,0 +1,823 @@
|
||||
use std::{
|
||||
collections::{HashMap, VecDeque},
|
||||
fs,
|
||||
hash::Hash,
|
||||
path::{Path, PathBuf},
|
||||
sync::Mutex,
|
||||
};
|
||||
|
||||
use rand::RngCore;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum StorageError {
|
||||
#[error("io error: {0}")]
|
||||
Io(String),
|
||||
#[error("serialization error")]
|
||||
Serde,
|
||||
#[error("database error: {0}")]
|
||||
Db(String),
|
||||
/// Unique constraint violation (e.g. user already exists).
|
||||
#[error("duplicate user: {0}")]
|
||||
DuplicateUser(String),
|
||||
}
|
||||
|
||||
fn lock<T>(m: &Mutex<T>) -> Result<std::sync::MutexGuard<'_, T>, StorageError> {
|
||||
m.lock()
|
||||
.map_err(|e| StorageError::Io(format!("lock poisoned: {e}")))
|
||||
}
|
||||
|
||||
// ── Store trait ──────────────────────────────────────────────────────────────
|
||||
|
||||
/// Abstraction over storage backends (file-backed, SQLCipher, etc.).
|
||||
pub trait Store: Send + Sync {
|
||||
fn upload_key_package(&self, identity_key: &[u8], package: Vec<u8>)
|
||||
-> Result<(), StorageError>;
|
||||
|
||||
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Enqueue a payload and return the monotonically increasing per-inbox sequence number
|
||||
/// assigned to this message. Clients sort by seq before MLS processing.
|
||||
fn enqueue(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
payload: Vec<u8>,
|
||||
) -> Result<u64, StorageError>;
|
||||
|
||||
/// Fetch and drain all queued messages, returning `(seq, payload)` pairs ordered by seq.
|
||||
fn fetch(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<Vec<(u64, Vec<u8>)>, StorageError>;
|
||||
|
||||
/// Fetch up to `limit` messages without draining the entire queue (Fix 8).
|
||||
/// Returns `(seq, payload)` pairs ordered by seq.
|
||||
fn fetch_limited(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
limit: usize,
|
||||
) -> Result<Vec<(u64, Vec<u8>)>, StorageError>;
|
||||
|
||||
/// Return the number of queued messages for (recipient, channel) (Fix 7).
|
||||
fn queue_depth(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<usize, StorageError>;
|
||||
|
||||
/// Delete messages older than `max_age_secs`. Returns count deleted (Fix 7).
|
||||
fn gc_expired_messages(&self, max_age_secs: u64) -> Result<usize, StorageError>;
|
||||
|
||||
fn upload_hybrid_key(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
hybrid_pk: Vec<u8>,
|
||||
) -> Result<(), StorageError>;
|
||||
|
||||
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Store the OPAQUE `ServerSetup` (generated once, loaded on restart).
|
||||
fn store_server_setup(&self, setup: Vec<u8>) -> Result<(), StorageError>;
|
||||
|
||||
/// Load the persisted `ServerSetup`, if any.
|
||||
fn get_server_setup(&self) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Store an OPAQUE user record (serialized `ServerRegistration`).
|
||||
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError>;
|
||||
|
||||
/// Retrieve an OPAQUE user record by username.
|
||||
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Check if a user record already exists (Fix 5).
|
||||
fn has_user_record(&self, username: &str) -> Result<bool, StorageError>;
|
||||
|
||||
/// Store identity key for a user (Fix 2).
|
||||
fn store_user_identity_key(
|
||||
&self,
|
||||
username: &str,
|
||||
identity_key: Vec<u8>,
|
||||
) -> Result<(), StorageError>;
|
||||
|
||||
/// Retrieve identity key for a user (Fix 2).
|
||||
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Reverse lookup: resolve an identity key to the registered username.
|
||||
fn resolve_identity_key(&self, identity_key: &[u8]) -> Result<Option<String>, StorageError>;
|
||||
|
||||
/// Peek at queued messages without removing them (non-destructive).
|
||||
/// Returns `(seq, payload)` pairs ordered by seq.
|
||||
fn peek(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
limit: usize,
|
||||
) -> Result<Vec<(u64, Vec<u8>)>, StorageError>;
|
||||
|
||||
/// Acknowledge (remove) all messages with seq <= seq_up_to.
|
||||
fn ack(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
seq_up_to: u64,
|
||||
) -> Result<usize, StorageError>;
|
||||
|
||||
/// Publish a P2P endpoint address for an identity key.
|
||||
fn publish_endpoint(&self, identity_key: &[u8], node_addr: Vec<u8>)
|
||||
-> Result<(), StorageError>;
|
||||
|
||||
/// Resolve a peer's P2P endpoint address.
|
||||
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Create a 1:1 channel between two members. Returns 16-byte channel_id (UUID).
|
||||
/// Members are stored in sorted order for deterministic lookup.
|
||||
fn create_channel(&self, member_a: &[u8], member_b: &[u8]) -> Result<Vec<u8>, StorageError>;
|
||||
|
||||
/// Get the two members of a channel by channel_id (16 bytes). Returns (member_a, member_b) in sorted order.
|
||||
fn get_channel_members(&self, channel_id: &[u8]) -> Result<Option<(Vec<u8>, Vec<u8>)>, StorageError>;
|
||||
|
||||
// ── Federation ──────────────────────────────────────────────────────────
|
||||
|
||||
/// Store the home server domain for an identity key.
|
||||
fn store_identity_home_server(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
home_server: &str,
|
||||
) -> Result<(), StorageError>;
|
||||
|
||||
/// Get the home server domain for an identity key.
|
||||
fn get_identity_home_server(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
) -> Result<Option<String>, StorageError>;
|
||||
|
||||
/// Insert or update a federation peer.
|
||||
fn upsert_federation_peer(
|
||||
&self,
|
||||
domain: &str,
|
||||
is_active: bool,
|
||||
) -> Result<(), StorageError>;
|
||||
|
||||
/// List all active federation peers.
|
||||
fn list_federation_peers(&self) -> Result<Vec<(String, bool)>, StorageError>;
|
||||
}
|
||||
|
||||
// ── ChannelKey ───────────────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Eq, PartialEq, Debug)]
|
||||
pub struct ChannelKey {
|
||||
pub channel_id: Vec<u8>,
|
||||
pub recipient_key: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Hash for ChannelKey {
|
||||
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||
self.channel_id.hash(state);
|
||||
self.recipient_key.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
// ── FileBackedStore ──────────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
struct QueueMapV1 {
|
||||
map: HashMap<Vec<u8>, VecDeque<Vec<u8>>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
struct QueueMapV2 {
|
||||
map: HashMap<ChannelKey, VecDeque<Vec<u8>>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default, Clone)]
|
||||
struct SeqEntry {
|
||||
seq: u64,
|
||||
data: Vec<u8>,
|
||||
}
|
||||
|
||||
/// V3 delivery store: each queue entry carries a monotonic per-inbox sequence number.
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
struct QueueMapV3 {
|
||||
map: HashMap<ChannelKey, VecDeque<SeqEntry>>,
|
||||
next_seq: HashMap<ChannelKey, u64>,
|
||||
}
|
||||
|
||||
/// File-backed storage for KeyPackages and delivery queues.
|
||||
///
|
||||
/// Each mutation flushes the entire map to disk. Suitable for MVP-scale loads.
|
||||
pub struct FileBackedStore {
|
||||
kp_path: PathBuf,
|
||||
ds_path: PathBuf,
|
||||
hk_path: PathBuf,
|
||||
setup_path: PathBuf,
|
||||
users_path: PathBuf,
|
||||
identity_keys_path: PathBuf,
|
||||
channels_path: PathBuf,
|
||||
key_packages: Mutex<HashMap<Vec<u8>, VecDeque<Vec<u8>>>>,
|
||||
deliveries: Mutex<QueueMapV3>,
|
||||
channels: Mutex<HashMap<Vec<u8>, (Vec<u8>, Vec<u8>)>>,
|
||||
hybrid_keys: Mutex<HashMap<Vec<u8>, Vec<u8>>>,
|
||||
users: Mutex<HashMap<String, Vec<u8>>>,
|
||||
identity_keys: Mutex<HashMap<String, Vec<u8>>>,
|
||||
endpoints: Mutex<HashMap<Vec<u8>, Vec<u8>>>,
|
||||
}
|
||||
|
||||
impl FileBackedStore {
|
||||
pub fn open(dir: impl AsRef<Path>) -> Result<Self, StorageError> {
|
||||
let dir = dir.as_ref();
|
||||
if !dir.exists() {
|
||||
fs::create_dir_all(dir).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
let kp_path = dir.join("keypackages.bin");
|
||||
let ds_path = dir.join("deliveries.bin");
|
||||
let hk_path = dir.join("hybridkeys.bin");
|
||||
let setup_path = dir.join("server_setup.bin");
|
||||
let users_path = dir.join("users.bin");
|
||||
let identity_keys_path = dir.join("identity_keys.bin");
|
||||
let channels_path = dir.join("channels.bin");
|
||||
|
||||
let key_packages = Mutex::new(Self::load_kp_map(&kp_path)?);
|
||||
let deliveries = Mutex::new(Self::load_delivery_map_v3(&ds_path)?);
|
||||
let hybrid_keys = Mutex::new(Self::load_hybrid_keys(&hk_path)?);
|
||||
let users = Mutex::new(Self::load_users(&users_path)?);
|
||||
let identity_keys = Mutex::new(Self::load_map_string_bytes(&identity_keys_path)?);
|
||||
let channels = Mutex::new(Self::load_channels(&channels_path)?);
|
||||
|
||||
Ok(Self {
|
||||
kp_path,
|
||||
ds_path,
|
||||
hk_path,
|
||||
setup_path,
|
||||
users_path,
|
||||
identity_keys_path,
|
||||
channels_path,
|
||||
key_packages,
|
||||
deliveries,
|
||||
channels,
|
||||
hybrid_keys,
|
||||
users,
|
||||
identity_keys,
|
||||
endpoints: Mutex::new(HashMap::new()),
|
||||
})
|
||||
}
|
||||
|
||||
fn load_channels(
|
||||
path: &Path,
|
||||
) -> Result<HashMap<Vec<u8>, (Vec<u8>, Vec<u8>)>, StorageError> {
|
||||
if !path.exists() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)
|
||||
}
|
||||
|
||||
fn flush_channels(
|
||||
&self,
|
||||
path: &Path,
|
||||
map: &HashMap<Vec<u8>, (Vec<u8>, Vec<u8>)>,
|
||||
) -> Result<(), StorageError> {
|
||||
let bytes = bincode::serialize(map).map_err(|_| StorageError::Serde)?;
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
||||
}
|
||||
|
||||
fn load_kp_map(path: &Path) -> Result<HashMap<Vec<u8>, VecDeque<Vec<u8>>>, StorageError> {
|
||||
if !path.exists() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
let map: QueueMapV1 = bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)?;
|
||||
Ok(map.map)
|
||||
}
|
||||
|
||||
fn flush_kp_map(
|
||||
&self,
|
||||
path: &Path,
|
||||
map: &HashMap<Vec<u8>, VecDeque<Vec<u8>>>,
|
||||
) -> Result<(), StorageError> {
|
||||
let payload = QueueMapV1 { map: map.clone() };
|
||||
let bytes = bincode::serialize(&payload).map_err(|_| StorageError::Serde)?;
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
||||
}
|
||||
|
||||
/// Load deliveries as V3. Falls back to V2 format (assigns seqs starting at 0).
|
||||
fn load_delivery_map_v3(path: &Path) -> Result<QueueMapV3, StorageError> {
|
||||
if !path.exists() {
|
||||
return Ok(QueueMapV3::default());
|
||||
}
|
||||
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(QueueMapV3::default());
|
||||
}
|
||||
// Try V3 first.
|
||||
if let Ok(v3) = bincode::deserialize::<QueueMapV3>(&bytes) {
|
||||
return Ok(v3);
|
||||
}
|
||||
// Fall back to V2: assign ascending seqs starting at 0 per channel.
|
||||
let v2 = bincode::deserialize::<QueueMapV2>(&bytes)
|
||||
.map_err(|_| StorageError::Io("deliveries file: unrecognised format".into()))?;
|
||||
let mut v3 = QueueMapV3::default();
|
||||
for (key, queue) in v2.map {
|
||||
let entries: VecDeque<SeqEntry> = queue
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, data)| SeqEntry { seq: i as u64, data })
|
||||
.collect();
|
||||
let next = entries.len() as u64;
|
||||
v3.next_seq.insert(key.clone(), next);
|
||||
v3.map.insert(key, entries);
|
||||
}
|
||||
Ok(v3)
|
||||
}
|
||||
|
||||
fn flush_delivery_map(&self, path: &Path, map: &QueueMapV3) -> Result<(), StorageError> {
|
||||
let bytes = bincode::serialize(map).map_err(|_| StorageError::Serde)?;
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
||||
}
|
||||
|
||||
fn load_hybrid_keys(path: &Path) -> Result<HashMap<Vec<u8>, Vec<u8>>, StorageError> {
|
||||
if !path.exists() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)
|
||||
}
|
||||
|
||||
fn flush_hybrid_keys(
|
||||
&self,
|
||||
path: &Path,
|
||||
map: &HashMap<Vec<u8>, Vec<u8>>,
|
||||
) -> Result<(), StorageError> {
|
||||
let bytes = bincode::serialize(map).map_err(|_| StorageError::Serde)?;
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
||||
}
|
||||
|
||||
fn load_users(path: &Path) -> Result<HashMap<String, Vec<u8>>, StorageError> {
|
||||
if !path.exists() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)
|
||||
}
|
||||
|
||||
fn flush_users(&self, path: &Path, map: &HashMap<String, Vec<u8>>) -> Result<(), StorageError> {
|
||||
let bytes = bincode::serialize(map).map_err(|_| StorageError::Serde)?;
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
||||
}
|
||||
|
||||
fn load_map_string_bytes(path: &Path) -> Result<HashMap<String, Vec<u8>>, StorageError> {
|
||||
Self::load_users(path)
|
||||
}
|
||||
|
||||
fn flush_map_string_bytes(
|
||||
&self,
|
||||
path: &Path,
|
||||
map: &HashMap<String, Vec<u8>>,
|
||||
) -> Result<(), StorageError> {
|
||||
self.flush_users(path, map)
|
||||
}
|
||||
}
|
||||
|
||||
impl Store for FileBackedStore {
|
||||
fn upload_key_package(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
package: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = lock(&self.key_packages)?;
|
||||
map.entry(identity_key.to_vec())
|
||||
.or_default()
|
||||
.push_back(package);
|
||||
self.flush_kp_map(&self.kp_path, &*map)
|
||||
}
|
||||
|
||||
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let mut map = lock(&self.key_packages)?;
|
||||
let package = map.get_mut(identity_key).and_then(|q| q.pop_front());
|
||||
self.flush_kp_map(&self.kp_path, &*map)?;
|
||||
Ok(package)
|
||||
}
|
||||
|
||||
fn enqueue(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
payload: Vec<u8>,
|
||||
) -> Result<u64, StorageError> {
|
||||
let mut inner = lock(&self.deliveries)?;
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
let entry = inner.next_seq.entry(key.clone()).or_insert(0);
|
||||
let seq = *entry;
|
||||
*entry = seq + 1;
|
||||
inner.map.entry(key).or_default().push_back(SeqEntry { seq, data: payload });
|
||||
self.flush_delivery_map(&self.ds_path, &*inner)?;
|
||||
Ok(seq)
|
||||
}
|
||||
|
||||
fn fetch(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<Vec<(u64, Vec<u8>)>, StorageError> {
|
||||
let mut inner = lock(&self.deliveries)?;
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
let messages: Vec<(u64, Vec<u8>)> = inner
|
||||
.map
|
||||
.get_mut(&key)
|
||||
.map(|q| q.drain(..).map(|e| (e.seq, e.data)).collect())
|
||||
.unwrap_or_default();
|
||||
self.flush_delivery_map(&self.ds_path, &*inner)?;
|
||||
Ok(messages)
|
||||
}
|
||||
|
||||
fn fetch_limited(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
limit: usize,
|
||||
) -> Result<Vec<(u64, Vec<u8>)>, StorageError> {
|
||||
let mut inner = lock(&self.deliveries)?;
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
let messages: Vec<(u64, Vec<u8>)> = inner
|
||||
.map
|
||||
.get_mut(&key)
|
||||
.map(|q| {
|
||||
let count = limit.min(q.len());
|
||||
q.drain(..count).map(|e| (e.seq, e.data)).collect()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
self.flush_delivery_map(&self.ds_path, &*inner)?;
|
||||
Ok(messages)
|
||||
}
|
||||
|
||||
fn queue_depth(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<usize, StorageError> {
|
||||
let inner = lock(&self.deliveries)?;
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
Ok(inner.map.get(&key).map(|q| q.len()).unwrap_or(0))
|
||||
}
|
||||
|
||||
fn gc_expired_messages(&self, _max_age_secs: u64) -> Result<usize, StorageError> {
|
||||
// FileBackedStore does not track timestamps per message — no-op.
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
fn upload_hybrid_key(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
hybrid_pk: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = lock(&self.hybrid_keys)?;
|
||||
map.insert(identity_key.to_vec(), hybrid_pk);
|
||||
self.flush_hybrid_keys(&self.hk_path, &*map)
|
||||
}
|
||||
|
||||
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let map = lock(&self.hybrid_keys)?;
|
||||
Ok(map.get(identity_key).cloned())
|
||||
}
|
||||
|
||||
fn store_server_setup(&self, setup: Vec<u8>) -> Result<(), StorageError> {
|
||||
if let Some(parent) = self.setup_path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
fs::write(&self.setup_path, setup).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let _ = std::fs::set_permissions(&self.setup_path, std::fs::Permissions::from_mode(0o600));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_server_setup(&self) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
if !self.setup_path.exists() {
|
||||
return Ok(None);
|
||||
}
|
||||
let bytes = fs::read(&self.setup_path).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
Ok(Some(bytes))
|
||||
}
|
||||
|
||||
fn store_user_record(&self, username: &str, record: Vec<u8>) -> Result<(), StorageError> {
|
||||
let mut map = lock(&self.users)?;
|
||||
match map.entry(username.to_string()) {
|
||||
std::collections::hash_map::Entry::Occupied(_) => {
|
||||
return Err(StorageError::DuplicateUser(username.to_string()))
|
||||
}
|
||||
std::collections::hash_map::Entry::Vacant(v) => {
|
||||
v.insert(record);
|
||||
}
|
||||
}
|
||||
self.flush_users(&self.users_path, &*map)
|
||||
}
|
||||
|
||||
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let map = lock(&self.users)?;
|
||||
Ok(map.get(username).cloned())
|
||||
}
|
||||
|
||||
fn has_user_record(&self, username: &str) -> Result<bool, StorageError> {
|
||||
let map = lock(&self.users)?;
|
||||
Ok(map.contains_key(username))
|
||||
}
|
||||
|
||||
fn store_user_identity_key(
|
||||
&self,
|
||||
username: &str,
|
||||
identity_key: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = lock(&self.identity_keys)?;
|
||||
map.insert(username.to_string(), identity_key);
|
||||
self.flush_map_string_bytes(&self.identity_keys_path, &*map)
|
||||
}
|
||||
|
||||
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let map = lock(&self.identity_keys)?;
|
||||
Ok(map.get(username).cloned())
|
||||
}
|
||||
|
||||
fn resolve_identity_key(&self, identity_key: &[u8]) -> Result<Option<String>, StorageError> {
|
||||
let map = lock(&self.identity_keys)?;
|
||||
for (username, ik) in map.iter() {
|
||||
if ik.as_slice() == identity_key {
|
||||
return Ok(Some(username.clone()));
|
||||
}
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn peek(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
limit: usize,
|
||||
) -> Result<Vec<(u64, Vec<u8>)>, StorageError> {
|
||||
let inner = lock(&self.deliveries)?;
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
let messages: Vec<(u64, Vec<u8>)> = inner
|
||||
.map
|
||||
.get(&key)
|
||||
.map(|q| {
|
||||
let count = if limit == 0 { q.len() } else { limit.min(q.len()) };
|
||||
q.iter()
|
||||
.take(count)
|
||||
.map(|e| (e.seq, e.data.clone()))
|
||||
.collect()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
// Non-destructive: do NOT flush.
|
||||
Ok(messages)
|
||||
}
|
||||
|
||||
fn ack(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
seq_up_to: u64,
|
||||
) -> Result<usize, StorageError> {
|
||||
let mut inner = lock(&self.deliveries)?;
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
let removed = if let Some(q) = inner.map.get_mut(&key) {
|
||||
let before = q.len();
|
||||
q.retain(|e| e.seq > seq_up_to);
|
||||
before - q.len()
|
||||
} else {
|
||||
0
|
||||
};
|
||||
self.flush_delivery_map(&self.ds_path, &*inner)?;
|
||||
Ok(removed)
|
||||
}
|
||||
|
||||
fn publish_endpoint(
|
||||
&self,
|
||||
identity_key: &[u8],
|
||||
node_addr: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = lock(&self.endpoints)?;
|
||||
map.insert(identity_key.to_vec(), node_addr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn resolve_endpoint(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
|
||||
let map = lock(&self.endpoints)?;
|
||||
Ok(map.get(identity_key).cloned())
|
||||
}
|
||||
|
||||
fn create_channel(&self, member_a: &[u8], member_b: &[u8]) -> Result<Vec<u8>, StorageError> {
|
||||
let (a, b) = if member_a < member_b {
|
||||
(member_a.to_vec(), member_b.to_vec())
|
||||
} else {
|
||||
(member_b.to_vec(), member_a.to_vec())
|
||||
};
|
||||
let mut map = lock(&self.channels)?;
|
||||
if let Some((channel_id, _)) = map.iter().find(|(_, (ma, mb))| ma == &a && mb == &b) {
|
||||
return Ok(channel_id.clone());
|
||||
}
|
||||
let mut channel_id = [0u8; 16];
|
||||
rand::thread_rng().fill_bytes(&mut channel_id);
|
||||
let channel_id = channel_id.to_vec();
|
||||
map.insert(channel_id.clone(), (a, b));
|
||||
self.flush_channels(&self.channels_path, &*map)?;
|
||||
Ok(channel_id)
|
||||
}
|
||||
|
||||
fn get_channel_members(&self, channel_id: &[u8]) -> Result<Option<(Vec<u8>, Vec<u8>)>, StorageError> {
|
||||
let map = lock(&self.channels)?;
|
||||
Ok(map.get(channel_id).cloned())
|
||||
}
|
||||
|
||||
fn store_identity_home_server(
|
||||
&self,
|
||||
_identity_key: &[u8],
|
||||
_home_server: &str,
|
||||
) -> Result<(), StorageError> {
|
||||
// File-backed store: federation mappings are ephemeral (in-memory only).
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_identity_home_server(
|
||||
&self,
|
||||
_identity_key: &[u8],
|
||||
) -> Result<Option<String>, StorageError> {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn upsert_federation_peer(
|
||||
&self,
|
||||
_domain: &str,
|
||||
_is_active: bool,
|
||||
) -> Result<(), StorageError> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn list_federation_peers(&self) -> Result<Vec<(String, bool)>, StorageError> {
|
||||
Ok(vec![])
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn temp_store() -> (TempDir, FileBackedStore) {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let store = FileBackedStore::open(dir.path()).unwrap();
|
||||
(dir, store)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn key_package_upload_fetch() {
|
||||
let (_dir, store) = temp_store();
|
||||
let ik = vec![1u8; 32];
|
||||
store.upload_key_package(&ik, vec![10, 20, 30]).unwrap();
|
||||
let pkg = store.fetch_key_package(&ik).unwrap();
|
||||
assert_eq!(pkg, Some(vec![10, 20, 30]));
|
||||
// Second fetch should return None (consumed)
|
||||
let pkg2 = store.fetch_key_package(&ik).unwrap();
|
||||
assert_eq!(pkg2, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn enqueue_fetch_with_seq() {
|
||||
let (_dir, store) = temp_store();
|
||||
let rk = vec![2u8; 32];
|
||||
let ch = vec![];
|
||||
let seq0 = store.enqueue(&rk, &ch, vec![1]).unwrap();
|
||||
let seq1 = store.enqueue(&rk, &ch, vec![2]).unwrap();
|
||||
assert_eq!(seq0, 0);
|
||||
assert_eq!(seq1, 1);
|
||||
let msgs = store.fetch(&rk, &ch).unwrap();
|
||||
assert_eq!(msgs.len(), 2);
|
||||
assert_eq!(msgs[0], (0, vec![1]));
|
||||
assert_eq!(msgs[1], (1, vec![2]));
|
||||
// After fetch, queue should be empty
|
||||
let msgs2 = store.fetch(&rk, &ch).unwrap();
|
||||
assert!(msgs2.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fetch_limited_respects_limit() {
|
||||
let (_dir, store) = temp_store();
|
||||
let rk = vec![3u8; 32];
|
||||
let ch = vec![];
|
||||
for i in 0..5 {
|
||||
store.enqueue(&rk, &ch, vec![i]).unwrap();
|
||||
}
|
||||
let msgs = store.fetch_limited(&rk, &ch, 2).unwrap();
|
||||
assert_eq!(msgs.len(), 2);
|
||||
assert_eq!(msgs[0].1, vec![0]);
|
||||
assert_eq!(msgs[1].1, vec![1]);
|
||||
// Remaining 3 should still be there
|
||||
let depth = store.queue_depth(&rk, &ch).unwrap();
|
||||
assert_eq!(depth, 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn queue_depth_tracking() {
|
||||
let (_dir, store) = temp_store();
|
||||
let rk = vec![4u8; 32];
|
||||
let ch = vec![];
|
||||
assert_eq!(store.queue_depth(&rk, &ch).unwrap(), 0);
|
||||
store.enqueue(&rk, &ch, vec![1]).unwrap();
|
||||
assert_eq!(store.queue_depth(&rk, &ch).unwrap(), 1);
|
||||
store.enqueue(&rk, &ch, vec![2]).unwrap();
|
||||
assert_eq!(store.queue_depth(&rk, &ch).unwrap(), 2);
|
||||
store.fetch(&rk, &ch).unwrap();
|
||||
assert_eq!(store.queue_depth(&rk, &ch).unwrap(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hybrid_key_upload_fetch() {
|
||||
let (_dir, store) = temp_store();
|
||||
let ik = vec![5u8; 32];
|
||||
assert_eq!(store.fetch_hybrid_key(&ik).unwrap(), None);
|
||||
store.upload_hybrid_key(&ik, vec![99; 100]).unwrap();
|
||||
assert_eq!(store.fetch_hybrid_key(&ik).unwrap(), Some(vec![99; 100]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn user_record_crud() {
|
||||
let (_dir, store) = temp_store();
|
||||
assert!(!store.has_user_record("alice").unwrap());
|
||||
store.store_user_record("alice", vec![1, 2, 3]).unwrap();
|
||||
assert!(store.has_user_record("alice").unwrap());
|
||||
assert_eq!(store.get_user_record("alice").unwrap(), Some(vec![1, 2, 3]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn user_identity_key_crud() {
|
||||
let (_dir, store) = temp_store();
|
||||
assert_eq!(store.get_user_identity_key("bob").unwrap(), None);
|
||||
store.store_user_identity_key("bob", vec![7u8; 32]).unwrap();
|
||||
assert_eq!(store.get_user_identity_key("bob").unwrap(), Some(vec![7u8; 32]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn endpoint_publish_resolve() {
|
||||
let (_dir, store) = temp_store();
|
||||
let ik = vec![8u8; 32];
|
||||
assert_eq!(store.resolve_endpoint(&ik).unwrap(), None);
|
||||
store.publish_endpoint(&ik, vec![10, 20]).unwrap();
|
||||
assert_eq!(store.resolve_endpoint(&ik).unwrap(), Some(vec![10, 20]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn create_channel_and_members() {
|
||||
let (_dir, store) = temp_store();
|
||||
let a = vec![1u8; 32];
|
||||
let b = vec![2u8; 32];
|
||||
assert_eq!(store.get_channel_members(&[0u8; 16]).unwrap(), None);
|
||||
let id1 = store.create_channel(&a, &b).unwrap();
|
||||
assert_eq!(id1.len(), 16);
|
||||
let members = store.get_channel_members(&id1).unwrap().unwrap();
|
||||
assert_eq!(members.0, a);
|
||||
assert_eq!(members.1, b);
|
||||
let id2 = store.create_channel(&b, &a).unwrap();
|
||||
assert_eq!(id1, id2);
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user