Add channel-aware delivery and update roadmap

This commit is contained in:
2026-02-22 06:31:16 +01:00
parent 9a3fa94858
commit d1ddef4cea
11 changed files with 374 additions and 85 deletions

View File

@@ -1,6 +1,7 @@
use std::{
collections::{HashMap, VecDeque},
fs,
hash::{Hash, Hasher},
path::{Path, PathBuf},
sync::Mutex,
};
@@ -16,10 +17,28 @@ pub enum StorageError {
}
#[derive(Serialize, Deserialize, Default)]
struct QueueMap {
struct QueueMapV1 {
map: HashMap<Vec<u8>, VecDeque<Vec<u8>>>,
}
#[derive(Serialize, Deserialize, Clone, Eq, PartialEq, Debug)]
pub struct ChannelKey {
pub channel_id: Vec<u8>,
pub recipient_key: Vec<u8>,
}
impl Hash for ChannelKey {
fn hash<H: Hasher>(&self, state: &mut H) {
self.channel_id.hash(state);
self.recipient_key.hash(state);
}
}
#[derive(Serialize, Deserialize, Default)]
struct QueueMapV2 {
map: HashMap<ChannelKey, VecDeque<Vec<u8>>>,
}
/// File-backed storage for KeyPackages and delivery queues.
///
/// Each mutation flushes the entire map to disk. Suitable for MVP-scale loads.
@@ -27,7 +46,7 @@ pub struct FileBackedStore {
kp_path: PathBuf,
ds_path: PathBuf,
key_packages: Mutex<HashMap<Vec<u8>, VecDeque<Vec<u8>>>>,
deliveries: Mutex<HashMap<Vec<u8>, VecDeque<Vec<u8>>>>,
deliveries: Mutex<HashMap<ChannelKey, VecDeque<Vec<u8>>>>,
}
impl FileBackedStore {
@@ -69,25 +88,42 @@ impl FileBackedStore {
Ok(package)
}
pub fn enqueue(&self, recipient_key: &[u8], payload: Vec<u8>) -> Result<(), StorageError> {
pub fn enqueue(
&self,
recipient_key: &[u8],
channel_id: &[u8],
payload: Vec<u8>,
) -> Result<(), StorageError> {
let mut map = self.deliveries.lock().unwrap();
map.entry(recipient_key.to_vec())
let key = ChannelKey {
channel_id: channel_id.to_vec(),
recipient_key: recipient_key.to_vec(),
};
map.entry(key)
.or_default()
.push_back(payload);
self.flush_map(&self.ds_path, &*map)
}
pub fn fetch(&self, recipient_key: &[u8]) -> Result<Vec<Vec<u8>>, StorageError> {
pub fn fetch(
&self,
recipient_key: &[u8],
channel_id: &[u8],
) -> Result<Vec<Vec<u8>>, StorageError> {
let mut map = self.deliveries.lock().unwrap();
let key = ChannelKey {
channel_id: channel_id.to_vec(),
recipient_key: recipient_key.to_vec(),
};
let messages = map
.get_mut(recipient_key)
.get_mut(&key)
.map(|q| q.drain(..).collect())
.unwrap_or_default();
self.flush_map(&self.ds_path, &*map)?;
Ok(messages)
}
fn load_map(path: &Path) -> Result<HashMap<Vec<u8>, VecDeque<Vec<u8>>>, StorageError> {
fn load_map(path: &Path) -> Result<HashMap<ChannelKey, VecDeque<Vec<u8>>>, StorageError> {
if !path.exists() {
return Ok(HashMap::new());
}
@@ -95,16 +131,30 @@ impl FileBackedStore {
if bytes.is_empty() {
return Ok(HashMap::new());
}
let map: QueueMap = bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)?;
Ok(map.map)
// Try v2 format (channel-aware). Fallback to legacy v1.
if let Ok(map) = bincode::deserialize::<QueueMapV2>(&bytes) {
return Ok(map.map);
}
let legacy: QueueMapV1 = bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)?;
let mut upgraded = HashMap::new();
for (recipient_key, queue) in legacy.map.into_iter() {
upgraded.insert(
ChannelKey {
channel_id: Vec::new(),
recipient_key,
},
queue,
);
}
Ok(upgraded)
}
fn flush_map(
&self,
path: &Path,
map: &HashMap<Vec<u8>, VecDeque<Vec<u8>>>,
map: &HashMap<ChannelKey, VecDeque<Vec<u8>>>,
) -> Result<(), StorageError> {
let payload = QueueMap { map: map.clone() };
let payload = QueueMapV2 { map: map.clone() };
let bytes = bincode::serialize(&payload).map_err(|_| StorageError::Serde)?;
if let Some(parent) = path.parent() {
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;