chore: rename quicproquo → quicprochat in Rust workspace

Rename all crate directories, package names, binary names, proto
package/module paths, ALPN strings, env var prefixes, config filenames,
mDNS service names, and plugin ABI symbols from quicproquo/qpq to
quicprochat/qpc.
This commit is contained in:
2026-03-07 18:24:52 +01:00
parent d8c1392587
commit a710037dde
212 changed files with 609 additions and 609 deletions

View File

@@ -0,0 +1,28 @@
//! Account domain logic — account deletion with KT tombstone.
use std::sync::{Arc, Mutex};
use quicprochat_kt::MerkleLog;
use crate::storage::Store;
use super::types::DomainError;
/// Domain service for account lifecycle operations.
pub struct AccountService {
pub store: Arc<dyn Store>,
pub kt_log: Arc<Mutex<MerkleLog>>,
}
impl AccountService {
pub fn delete_account(&self, caller_identity_key: &[u8]) -> Result<(), DomainError> {
self.store.delete_account(caller_identity_key)?;
// Append a KT tombstone entry so the deletion is auditable.
if let Ok(mut log) = self.kt_log.lock() {
log.append("__tombstone__", caller_identity_key);
}
Ok(())
}
}

View File

@@ -0,0 +1,72 @@
//! Authentication domain logic — OPAQUE registration and login.
//!
//! This module contains the pure business logic for OPAQUE auth,
//! extracted from `node_service/auth_ops.rs`. It operates on domain
//! types and the `Store` trait, with no dependency on Cap'n Proto or Protobuf.
use std::sync::Arc;
use dashmap::DashMap;
use opaque_ke::ServerSetup;
use quicprochat_core::opaque_auth::OpaqueSuite;
use crate::auth::{AuthConfig, PendingLogin, SessionInfo};
use crate::storage::{Store, StorageError};
use super::types::*;
/// Shared state needed by auth operations.
pub struct AuthService {
pub store: Arc<dyn Store>,
pub opaque_setup: Arc<ServerSetup<OpaqueSuite>>,
pub pending_logins: Arc<DashMap<String, PendingLogin>>,
pub sessions: Arc<DashMap<Vec<u8>, SessionInfo>>,
pub auth_cfg: Arc<AuthConfig>,
}
impl AuthService {
/// Validate a session token and return the caller's auth context.
pub fn validate_session(&self, token: &[u8]) -> Option<CallerAuth> {
let info = self.sessions.get(token)?;
if info.expires_at <= crate::auth::current_timestamp() {
self.sessions.remove(token);
return None;
}
Some(CallerAuth {
identity_key: info.identity_key.clone(),
token: token.to_vec(),
device_id: None,
})
}
/// Start OPAQUE registration.
pub fn register_start(&self, req: RegisterStartReq) -> Result<RegisterStartResp, StorageError> {
use opaque_ke::ServerRegistration;
let result = ServerRegistration::<OpaqueSuite>::start(
&self.opaque_setup,
opaque_ke::RegistrationRequest::deserialize(&req.request_bytes)
.map_err(|e| StorageError::Io(format!("bad registration request: {e}")))?,
req.username.as_bytes(),
)
.map_err(|e| StorageError::Io(format!("OPAQUE register start: {e}")))?;
let response_bytes = result.message.serialize().to_vec();
Ok(RegisterStartResp { response_bytes })
}
/// Finish OPAQUE registration — persist user record and identity key.
pub fn register_finish(&self, req: RegisterFinishReq) -> Result<RegisterFinishResp, StorageError> {
let upload = opaque_ke::RegistrationUpload::<OpaqueSuite>::deserialize(&req.upload_bytes)
.map_err(|e| StorageError::Io(format!("bad registration upload: {e}")))?;
let record = opaque_ke::ServerRegistration::<OpaqueSuite>::finish(upload);
let serialized = record.serialize().to_vec();
self.store.store_user_record(&req.username, serialized)?;
self.store
.store_user_identity_key(&req.username, req.identity_key)?;
Ok(RegisterFinishResp { success: true })
}
}

View File

@@ -0,0 +1,193 @@
//! Blob domain logic — chunked file upload/download with SHA-256 verification.
use std::io::{Read, Seek, SeekFrom, Write};
use std::path::PathBuf;
use sha2::{Digest, Sha256};
use super::types::*;
/// Maximum blob size: 100 MB.
const MAX_BLOB_SIZE: u64 = 100 * 1024 * 1024;
/// Maximum download chunk size: 256 KB.
const MAX_DOWNLOAD_CHUNK: u32 = 256 * 1024;
/// Metadata stored alongside each completed blob.
#[derive(serde::Serialize, serde::Deserialize)]
struct BlobMeta {
mime_type: String,
total_size: u64,
uploaded_at: u64,
}
/// Domain service for blob (file attachment) storage.
pub struct BlobService {
pub data_dir: PathBuf,
}
impl BlobService {
fn blobs_dir(&self) -> PathBuf {
self.data_dir.join("blobs")
}
pub fn upload_blob(
&self,
req: UploadBlobReq,
_auth: &CallerAuth,
) -> Result<UploadBlobResp, DomainError> {
if req.blob_hash.len() != 32 {
return Err(DomainError::BlobHashLength(req.blob_hash.len()));
}
if req.total_size > MAX_BLOB_SIZE {
return Err(DomainError::BlobTooLarge(req.total_size));
}
if req.total_size == 0 {
return Err(DomainError::BadParams("total_size must be > 0".into()));
}
if req
.offset
.checked_add(req.chunk.len() as u64)
.is_none_or(|end| end > req.total_size)
{
return Err(DomainError::BadParams(format!(
"chunk out of bounds: offset={} + chunk_len={} > total_size={}",
req.offset,
req.chunk.len(),
req.total_size
)));
}
let blob_hex = hex::encode(&req.blob_hash);
let dir = self.blobs_dir();
std::fs::create_dir_all(&dir)
.map_err(|e| DomainError::Io(format!("create blobs directory: {e}")))?;
let part_path = dir.join(format!("{blob_hex}.part"));
let final_path = dir.join(&blob_hex);
let meta_path = dir.join(format!("{blob_hex}.meta"));
// Already fully uploaded.
if final_path.exists() {
return Ok(UploadBlobResp {
blob_id: req.blob_hash,
});
}
// Write chunk at offset.
let mut file = std::fs::OpenOptions::new()
.create(true)
.write(true)
.truncate(false)
.open(&part_path)
.map_err(|e| DomainError::Io(format!("open .part file: {e}")))?;
file.seek(SeekFrom::Start(req.offset))
.map_err(|e| DomainError::Io(format!("seek: {e}")))?;
file.write_all(&req.chunk)
.map_err(|e| DomainError::Io(format!("write chunk: {e}")))?;
file.sync_all()
.map_err(|e| DomainError::Io(format!("sync: {e}")))?;
// Check if upload is complete.
let end = req.offset + req.chunk.len() as u64;
if end == req.total_size {
// Verify SHA-256.
let mut vfile = std::fs::File::open(&part_path)
.map_err(|e| DomainError::Io(format!("open for verify: {e}")))?;
let mut hasher = Sha256::new();
let mut buf = [0u8; 64 * 1024];
loop {
let n = vfile
.read(&mut buf)
.map_err(|e| DomainError::Io(format!("read: {e}")))?;
if n == 0 {
break;
}
hasher.update(&buf[..n]);
}
let computed: [u8; 32] = hasher.finalize().into();
if computed[..] != req.blob_hash[..] {
let _ = std::fs::remove_file(&part_path);
return Err(DomainError::BlobHashMismatch);
}
// Finalize.
std::fs::rename(&part_path, &final_path)
.map_err(|e| DomainError::Io(format!("rename .part: {e}")))?;
// Write metadata.
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
let meta = BlobMeta {
mime_type: req.mime_type,
total_size: req.total_size,
uploaded_at: now,
};
if let Ok(json) = serde_json::to_string_pretty(&meta) {
let _ = std::fs::write(&meta_path, json.as_bytes());
}
}
Ok(UploadBlobResp {
blob_id: req.blob_hash,
})
}
pub fn download_blob(
&self,
req: DownloadBlobReq,
_auth: &CallerAuth,
) -> Result<DownloadBlobResp, DomainError> {
if req.blob_id.len() != 32 {
return Err(DomainError::BlobHashLength(req.blob_id.len()));
}
let blob_hex = hex::encode(&req.blob_id);
let dir = self.blobs_dir();
let blob_path = dir.join(&blob_hex);
let meta_path = dir.join(format!("{blob_hex}.meta"));
if !blob_path.exists() {
return Err(DomainError::BlobNotFound);
}
// Read metadata.
let meta_json = std::fs::read_to_string(&meta_path)
.map_err(|e| DomainError::Io(format!("read blob metadata: {e}")))?;
let meta: BlobMeta = serde_json::from_str(&meta_json)
.map_err(|e| DomainError::Io(format!("corrupt blob metadata: {e}")))?;
// Read chunk.
let mut file = std::fs::File::open(&blob_path)
.map_err(|e| DomainError::Io(format!("open blob: {e}")))?;
let file_len = file
.metadata()
.map_err(|e| DomainError::Io(format!("file metadata: {e}")))?
.len();
if req.offset >= file_len {
return Ok(DownloadBlobResp {
chunk: vec![],
total_size: meta.total_size,
mime_type: meta.mime_type,
});
}
file.seek(SeekFrom::Start(req.offset))
.map_err(|e| DomainError::Io(format!("seek: {e}")))?;
let remaining = (file_len - req.offset) as usize;
let to_read = remaining.min(req.length.min(MAX_DOWNLOAD_CHUNK) as usize);
let mut chunk = vec![0u8; to_read];
file.read_exact(&mut chunk)
.map_err(|e| DomainError::Io(format!("read chunk: {e}")))?;
Ok(DownloadBlobResp {
chunk,
total_size: meta.total_size,
mime_type: meta.mime_type,
})
}
}

View File

@@ -0,0 +1,38 @@
//! Channel domain logic — 1:1 DM channel creation.
use std::sync::Arc;
use crate::storage::Store;
use super::types::*;
/// Domain service for 1:1 channel management.
pub struct ChannelService {
pub store: Arc<dyn Store>,
}
impl ChannelService {
pub fn create_channel(
&self,
req: CreateChannelReq,
caller_identity_key: &[u8],
) -> Result<CreateChannelResp, DomainError> {
if req.peer_key.len() != 32 {
return Err(DomainError::InvalidIdentityKey(req.peer_key.len()));
}
if caller_identity_key == req.peer_key.as_slice() {
return Err(DomainError::BadParams(
"peer_key must not equal caller identity".into(),
));
}
let (channel_id, was_new) = self
.store
.create_channel(caller_identity_key, &req.peer_key)?;
Ok(CreateChannelResp {
channel_id,
was_new,
})
}
}

View File

@@ -0,0 +1,352 @@
//! Delivery domain logic — enqueue, fetch, peek, ack.
//!
//! Pure business logic operating on `Store` trait and domain types.
//!
//! ## Multi-device delivery
//!
//! When a message is enqueued for a recipient identity, the service resolves
//! all registered device IDs for that identity and enqueues a copy of the
//! payload to each device-scoped queue. The queue key is a composite of
//! `identity_key + device_id`, so each device maintains its own sequence
//! counter and ack state.
//!
//! If the recipient has no registered devices, the message is delivered to
//! the bare `identity_key` queue (backwards compatible with single-device
//! clients).
use std::sync::Arc;
use dashmap::DashMap;
use tokio::sync::Notify;
use crate::storage::Store;
use super::types::*;
/// Build a device-scoped recipient key: `identity_key || device_id`.
/// When `device_id` is empty, returns a clone of `identity_key` (single-device compat).
fn device_recipient_key(identity_key: &[u8], device_id: &[u8]) -> Vec<u8> {
if device_id.is_empty() {
return identity_key.to_vec();
}
let mut key = Vec::with_capacity(identity_key.len() + device_id.len());
key.extend_from_slice(identity_key);
key.extend_from_slice(device_id);
key
}
/// Shared state needed by delivery operations.
pub struct DeliveryService {
pub store: Arc<dyn Store>,
pub waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
}
impl DeliveryService {
/// Resolve the device-scoped recipient keys for an identity.
/// Returns a list of composite keys (identity_key + device_id) for each
/// registered device. If no devices are registered, returns a single-element
/// list with the bare identity_key for backwards compatibility.
fn resolve_device_keys(&self, identity_key: &[u8]) -> Vec<Vec<u8>> {
let devices = self.store.list_devices(identity_key).unwrap_or_default();
if devices.is_empty() {
vec![identity_key.to_vec()]
} else {
devices
.into_iter()
.map(|(device_id, _, _)| device_recipient_key(identity_key, &device_id))
.collect()
}
}
/// Wake any long-polling waiter for the given recipient key.
fn wake_waiter(&self, recipient_key: &[u8]) {
if let Some(notify) = self.waiters.get(recipient_key) {
notify.notify_one();
}
}
/// Enqueue a payload for delivery to all devices of the recipient.
///
/// Returns the sequence number from the *first* device queue (for backwards
/// compatibility with single-device callers).
pub fn enqueue(&self, req: EnqueueReq) -> Result<EnqueueResp, crate::storage::StorageError> {
let ttl = if req.ttl_secs > 0 {
Some(req.ttl_secs)
} else {
None
};
let device_keys = self.resolve_device_keys(&req.recipient_key);
let mut first_seq = 0;
for (i, dk) in device_keys.iter().enumerate() {
let seq = self.store.enqueue(
dk,
&req.channel_id,
req.payload.clone(),
ttl,
)?;
if i == 0 {
first_seq = seq;
}
self.wake_waiter(dk);
}
// Also wake the bare identity_key waiter (legacy clients).
self.wake_waiter(&req.recipient_key);
Ok(EnqueueResp {
seq: first_seq,
delivery_proof: Vec::new(), // Proof generated at RPC handler layer (see v2_handlers/delivery.rs)
})
}
/// Fetch and drain queued messages for a specific device.
///
/// The `recipient_key` should be the device-scoped composite key
/// (`identity_key + device_id`) or bare `identity_key` for single-device.
pub fn fetch(&self, req: FetchReq) -> Result<FetchResp, crate::storage::StorageError> {
let messages = if req.limit > 0 {
self.store
.fetch_limited(&req.recipient_key, &req.channel_id, req.limit as usize)?
} else {
self.store.fetch(&req.recipient_key, &req.channel_id)?
};
Ok(FetchResp {
payloads: messages
.into_iter()
.map(|(seq, data)| Envelope { seq, data })
.collect(),
})
}
/// Peek at messages without removing them.
pub fn peek(&self, req: PeekReq) -> Result<PeekResp, crate::storage::StorageError> {
let messages = self.store.peek(
&req.recipient_key,
&req.channel_id,
if req.limit > 0 { req.limit as usize } else { 0 },
)?;
Ok(PeekResp {
payloads: messages
.into_iter()
.map(|(seq, data)| Envelope { seq, data })
.collect(),
})
}
/// Acknowledge messages up to a sequence number.
pub fn ack(&self, req: AckReq) -> Result<(), crate::storage::StorageError> {
self.store
.ack(&req.recipient_key, &req.channel_id, req.seq_up_to)?;
Ok(())
}
/// Batch enqueue to multiple recipients (with multi-device fan-out for each).
///
/// Returns one sequence number per recipient identity (from the first device queue).
pub fn batch_enqueue(
&self,
req: BatchEnqueueReq,
) -> Result<BatchEnqueueResp, crate::storage::StorageError> {
let ttl = if req.ttl_secs > 0 {
Some(req.ttl_secs)
} else {
None
};
let mut seqs = Vec::with_capacity(req.recipient_keys.len());
for rk in &req.recipient_keys {
let device_keys = self.resolve_device_keys(rk);
let mut first_seq = 0;
for (i, dk) in device_keys.iter().enumerate() {
let seq = self.store.enqueue(dk, &req.channel_id, req.payload.clone(), ttl)?;
if i == 0 {
first_seq = seq;
}
self.wake_waiter(dk);
}
self.wake_waiter(rk);
seqs.push(first_seq);
}
Ok(BatchEnqueueResp { seqs })
}
/// Build a device-scoped recipient key from identity_key and device_id.
/// Public helper for RPC handlers to build the correct fetch/ack key.
pub fn device_recipient_key(identity_key: &[u8], device_id: &[u8]) -> Vec<u8> {
device_recipient_key(identity_key, device_id)
}
}
#[cfg(test)]
#[allow(clippy::unwrap_used)]
mod tests {
use super::*;
use crate::storage::FileBackedStore;
fn test_service() -> (tempfile::TempDir, DeliveryService) {
let dir = tempfile::tempdir().unwrap();
let store = Arc::new(FileBackedStore::open(dir.path()).unwrap());
let svc = DeliveryService {
store,
waiters: Arc::new(DashMap::new()),
};
(dir, svc)
}
#[test]
fn enqueue_single_device_backwards_compat() {
let (_dir, svc) = test_service();
let ik = vec![1u8; 32];
let ch = vec![0u8; 16];
// No devices registered — should enqueue to bare identity_key.
let resp = svc
.enqueue(EnqueueReq {
recipient_key: ik.clone(),
payload: b"hello".to_vec(),
channel_id: ch.clone(),
ttl_secs: 0,
})
.unwrap();
assert_eq!(resp.seq, 0);
// Fetch from bare identity_key.
let fetched = svc
.fetch(FetchReq {
recipient_key: ik,
channel_id: ch,
limit: 10,
})
.unwrap();
assert_eq!(fetched.payloads.len(), 1);
assert_eq!(fetched.payloads[0].data, b"hello");
}
#[test]
fn enqueue_multi_device_fanout() {
let (_dir, svc) = test_service();
let ik = vec![2u8; 32];
let ch = vec![0u8; 16];
let dev_a = b"device-a".to_vec();
let dev_b = b"device-b".to_vec();
// Register two devices.
svc.store
.register_device(&ik, &dev_a, "Phone")
.unwrap();
svc.store
.register_device(&ik, &dev_b, "Laptop")
.unwrap();
// Enqueue a message.
svc.enqueue(EnqueueReq {
recipient_key: ik.clone(),
payload: b"fanout-msg".to_vec(),
channel_id: ch.clone(),
ttl_secs: 0,
})
.unwrap();
// Each device should receive the message on its own queue.
let key_a = device_recipient_key(&ik, &dev_a);
let key_b = device_recipient_key(&ik, &dev_b);
let msgs_a = svc
.fetch(FetchReq {
recipient_key: key_a,
channel_id: ch.clone(),
limit: 10,
})
.unwrap();
assert_eq!(msgs_a.payloads.len(), 1);
assert_eq!(msgs_a.payloads[0].data, b"fanout-msg");
let msgs_b = svc
.fetch(FetchReq {
recipient_key: key_b,
channel_id: ch.clone(),
limit: 10,
})
.unwrap();
assert_eq!(msgs_b.payloads.len(), 1);
assert_eq!(msgs_b.payloads[0].data, b"fanout-msg");
// Bare identity_key queue should be empty (not used when devices exist).
let msgs_bare = svc
.fetch(FetchReq {
recipient_key: ik,
channel_id: ch,
limit: 10,
})
.unwrap();
assert!(msgs_bare.payloads.is_empty());
}
#[test]
fn batch_enqueue_multi_device() {
let (_dir, svc) = test_service();
let ik1 = vec![3u8; 32];
let ik2 = vec![4u8; 32];
let ch = vec![0u8; 16];
let dev = b"dev1".to_vec();
// ik1 has a device, ik2 has none.
svc.store
.register_device(&ik1, &dev, "Phone")
.unwrap();
let resp = svc
.batch_enqueue(BatchEnqueueReq {
recipient_keys: vec![ik1.clone(), ik2.clone()],
payload: b"batch-msg".to_vec(),
channel_id: ch.clone(),
ttl_secs: 0,
})
.unwrap();
assert_eq!(resp.seqs.len(), 2);
// ik1 device should have the message.
let key_1 = device_recipient_key(&ik1, &dev);
let msgs_1 = svc
.fetch(FetchReq {
recipient_key: key_1,
channel_id: ch.clone(),
limit: 10,
})
.unwrap();
assert_eq!(msgs_1.payloads.len(), 1);
// ik2 (no devices) should have it on bare key.
let msgs_2 = svc
.fetch(FetchReq {
recipient_key: ik2,
channel_id: ch,
limit: 10,
})
.unwrap();
assert_eq!(msgs_2.payloads.len(), 1);
}
#[test]
fn device_recipient_key_construction() {
let ik = vec![1u8; 32];
let dev = b"my-device".to_vec();
// With device_id.
let key = device_recipient_key(&ik, &dev);
assert_eq!(key.len(), 32 + dev.len());
assert_eq!(&key[..32], &ik[..]);
assert_eq!(&key[32..], dev.as_slice());
// Empty device_id returns bare identity_key.
let bare = device_recipient_key(&ik, &[]);
assert_eq!(bare, ik);
}
}

View File

@@ -0,0 +1,76 @@
//! Device registry domain logic — register, list, revoke devices.
use std::sync::Arc;
use crate::storage::Store;
use super::types::*;
const MAX_DEVICES_PER_IDENTITY: usize = 5;
/// Domain service for multi-device management.
pub struct DeviceService {
pub store: Arc<dyn Store>,
}
impl DeviceService {
pub fn register_device(
&self,
req: RegisterDeviceReq,
caller_identity_key: &[u8],
) -> Result<RegisterDeviceResp, DomainError> {
if req.device_id.is_empty() {
return Err(DomainError::BadParams(
"device_id must not be empty".into(),
));
}
let count = self.store.device_count(caller_identity_key)?;
if count >= MAX_DEVICES_PER_IDENTITY {
return Err(DomainError::DeviceLimit(MAX_DEVICES_PER_IDENTITY));
}
let success =
self.store
.register_device(caller_identity_key, &req.device_id, &req.device_name)?;
Ok(RegisterDeviceResp { success })
}
pub fn list_devices(
&self,
caller_identity_key: &[u8],
) -> Result<ListDevicesResp, DomainError> {
let raw = self.store.list_devices(caller_identity_key)?;
let devices = raw
.into_iter()
.map(|(device_id, device_name, registered_at)| DeviceInfo {
device_id,
device_name,
registered_at,
})
.collect();
Ok(ListDevicesResp { devices })
}
pub fn revoke_device(
&self,
req: RevokeDeviceReq,
caller_identity_key: &[u8],
) -> Result<RevokeDeviceResp, DomainError> {
if req.device_id.is_empty() {
return Err(DomainError::BadParams(
"device_id must not be empty".into(),
));
}
let success = self
.store
.revoke_device(caller_identity_key, &req.device_id)?;
if !success {
return Err(DomainError::DeviceNotFound);
}
Ok(RevokeDeviceResp { success })
}
}

View File

@@ -0,0 +1,208 @@
//! Group management domain logic — metadata, membership tracking.
use std::sync::Arc;
use crate::storage::Store;
use super::types::*;
/// Domain service for group metadata and membership.
pub struct GroupService {
pub store: Arc<dyn Store>,
}
impl GroupService {
/// Update group metadata (name, description, avatar_hash).
pub fn update_metadata(
&self,
req: UpdateGroupMetadataReq,
caller_identity_key: &[u8],
) -> Result<(), DomainError> {
if req.group_id.is_empty() {
return Err(DomainError::BadParams("group_id must not be empty".into()));
}
self.store.store_group_metadata(
&req.group_id,
&req.name,
&req.description,
&req.avatar_hash,
caller_identity_key,
)?;
Ok(())
}
/// List group members with resolved usernames.
pub fn list_members(
&self,
req: ListGroupMembersReq,
) -> Result<ListGroupMembersResp, DomainError> {
if req.group_id.is_empty() {
return Err(DomainError::BadParams("group_id must not be empty".into()));
}
let raw = self.store.list_group_members(&req.group_id)?;
let members = raw
.into_iter()
.map(|(identity_key, joined_at)| {
let username = self
.store
.resolve_identity_key(&identity_key)
.ok()
.flatten()
.unwrap_or_default();
GroupMemberInfo {
identity_key,
username,
joined_at,
}
})
.collect();
Ok(ListGroupMembersResp { members })
}
/// Track a member addition in the server-side membership table.
pub fn add_member(
&self,
group_id: &[u8],
identity_key: &[u8],
) -> Result<(), DomainError> {
self.store.add_group_member(group_id, identity_key)?;
Ok(())
}
/// Track a member removal in the server-side membership table.
pub fn remove_member(
&self,
group_id: &[u8],
identity_key: &[u8],
) -> Result<bool, DomainError> {
let removed = self.store.remove_group_member(group_id, identity_key)?;
Ok(removed)
}
/// Get group metadata.
pub fn get_metadata(
&self,
group_id: &[u8],
) -> Result<Option<GroupMetadata>, DomainError> {
if group_id.is_empty() {
return Err(DomainError::BadParams("group_id must not be empty".into()));
}
match self.store.get_group_metadata(group_id)? {
Some((name, description, avatar_hash, creator_key, created_at)) => {
Ok(Some(GroupMetadata {
group_id: group_id.to_vec(),
name,
description,
avatar_hash,
creator_key,
created_at,
}))
}
None => Ok(None),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::storage::FileBackedStore;
fn make_service() -> (GroupService, tempfile::TempDir) {
let dir = tempfile::tempdir().expect("tempdir");
let store = FileBackedStore::open(dir.path()).expect("open store");
let svc = GroupService {
store: Arc::new(store),
};
(svc, dir)
}
#[test]
fn update_and_get_metadata() {
let (svc, _dir) = make_service();
let group_id = b"test-group-00001".to_vec();
let caller = b"caller-key".to_vec();
svc.update_metadata(
UpdateGroupMetadataReq {
group_id: group_id.clone(),
name: "Test Group".into(),
description: "A test group".into(),
avatar_hash: vec![0xAB],
},
&caller,
)
.expect("update_metadata should succeed");
let meta = svc.get_metadata(&group_id).expect("get_metadata should succeed");
let meta = meta.expect("metadata should exist");
assert_eq!(meta.name, "Test Group");
assert_eq!(meta.description, "A test group");
assert_eq!(meta.avatar_hash, vec![0xAB]);
assert_eq!(meta.creator_key, caller);
}
#[test]
fn get_metadata_nonexistent_returns_none() {
let (svc, _dir) = make_service();
let result = svc.get_metadata(b"no-such-group").expect("should not error");
assert!(result.is_none());
}
#[test]
fn empty_group_id_rejected() {
let (svc, _dir) = make_service();
let err = svc.update_metadata(
UpdateGroupMetadataReq {
group_id: Vec::new(),
name: "X".into(),
description: String::new(),
avatar_hash: Vec::new(),
},
b"caller",
);
assert!(err.is_err());
}
#[test]
fn add_list_remove_members() {
let (svc, _dir) = make_service();
let group_id = b"membership-group".to_vec();
let member_a = b"member-a-key".to_vec();
let member_b = b"member-b-key".to_vec();
svc.add_member(&group_id, &member_a).expect("add a");
svc.add_member(&group_id, &member_b).expect("add b");
let resp = svc
.list_members(ListGroupMembersReq {
group_id: group_id.clone(),
})
.expect("list members");
assert_eq!(resp.members.len(), 2);
let removed = svc.remove_member(&group_id, &member_a).expect("remove a");
assert!(removed);
let resp = svc
.list_members(ListGroupMembersReq {
group_id: group_id.clone(),
})
.expect("list members after removal");
assert_eq!(resp.members.len(), 1);
assert_eq!(resp.members[0].identity_key, member_b);
}
#[test]
fn remove_nonexistent_member_returns_false() {
let (svc, _dir) = make_service();
let removed = svc
.remove_member(b"group", b"nobody")
.expect("remove nonexistent");
assert!(!removed);
}
}

View File

@@ -0,0 +1,93 @@
//! Key management domain logic — KeyPackage and hybrid key operations.
use std::sync::Arc;
use sha2::{Digest, Sha256};
use crate::storage::Store;
use super::types::*;
const MAX_KEYPACKAGE_BYTES: usize = 1024 * 1024; // 1 MB
/// Domain service for MLS KeyPackage and hybrid (PQ) key management.
pub struct KeyService {
pub store: Arc<dyn Store>,
}
impl KeyService {
pub fn upload_key_package(
&self,
req: UploadKeyPackageReq,
_auth: &CallerAuth,
) -> Result<UploadKeyPackageResp, DomainError> {
if req.identity_key.len() != 32 {
return Err(DomainError::InvalidIdentityKey(req.identity_key.len()));
}
if req.package.is_empty() {
return Err(DomainError::EmptyPackage);
}
if req.package.len() > MAX_KEYPACKAGE_BYTES {
return Err(DomainError::PackageTooLarge(req.package.len()));
}
let fingerprint: Vec<u8> = Sha256::digest(&req.package).to_vec();
self.store
.upload_key_package(&req.identity_key, req.package)?;
Ok(UploadKeyPackageResp { fingerprint })
}
pub fn fetch_key_package(
&self,
req: FetchKeyPackageReq,
_auth: &CallerAuth,
) -> Result<FetchKeyPackageResp, DomainError> {
let package = self.store.fetch_key_package(&req.identity_key)?;
Ok(FetchKeyPackageResp {
package: package.unwrap_or_default(),
})
}
pub fn upload_hybrid_key(
&self,
req: UploadHybridKeyReq,
_auth: &CallerAuth,
) -> Result<(), DomainError> {
if req.identity_key.len() != 32 {
return Err(DomainError::InvalidIdentityKey(req.identity_key.len()));
}
if req.hybrid_public_key.is_empty() {
return Err(DomainError::EmptyHybridKey);
}
self.store
.upload_hybrid_key(&req.identity_key, req.hybrid_public_key)?;
Ok(())
}
pub fn fetch_hybrid_key(
&self,
req: FetchHybridKeyReq,
_auth: &CallerAuth,
) -> Result<FetchHybridKeyResp, DomainError> {
let hybrid_public_key = self
.store
.fetch_hybrid_key(&req.identity_key)?
.unwrap_or_default();
Ok(FetchHybridKeyResp { hybrid_public_key })
}
pub fn fetch_hybrid_keys(
&self,
req: FetchHybridKeysReq,
_auth: &CallerAuth,
) -> Result<FetchHybridKeysResp, DomainError> {
let mut keys = Vec::with_capacity(req.identity_keys.len());
for ik in &req.identity_keys {
let pk = self.store.fetch_hybrid_key(ik)?.unwrap_or_default();
keys.push(pk);
}
Ok(FetchHybridKeysResp { keys })
}
}

View File

@@ -0,0 +1,24 @@
//! Domain types and service logic — protocol-agnostic.
//!
//! These types define the server's business logic independently of any
//! serialization format (Cap'n Proto, Protobuf). RPC handlers translate
//! wire-format messages into these types, call service functions, and
//! translate the results back.
pub mod types;
pub mod auth;
pub mod delivery;
pub mod keys;
pub mod channels;
pub mod users;
pub mod blobs;
pub mod devices;
pub mod groups;
pub mod p2p;
pub mod account;
pub mod moderation;
pub mod notification;
pub mod rate_limit;
pub mod recovery;
#[cfg(feature = "traffic-resistance")]
pub mod traffic_resistance;

View File

@@ -0,0 +1,304 @@
//! Moderation domain logic — report, ban, unban, list.
//!
//! Pure business logic operating on `Store` trait and domain types.
use std::sync::Arc;
use crate::storage::Store;
use super::types::*;
/// Shared state needed by moderation operations.
pub struct ModerationService {
pub store: Arc<dyn Store>,
}
impl ModerationService {
/// Submit an encrypted report for a message.
pub fn report_message(
&self,
req: ReportMessageReq,
) -> Result<ReportMessageResp, DomainError> {
if req.encrypted_report.is_empty() {
return Err(DomainError::BadParams(
"encrypted report must not be empty".into(),
));
}
self.store
.store_report(
&req.encrypted_report,
&req.conversation_id,
&req.reporter_identity,
)
.map_err(DomainError::Storage)?;
tracing::info!(
reporter_prefix = %hex_prefix(&req.reporter_identity),
"audit: message reported"
);
Ok(ReportMessageResp { accepted: true })
}
/// Ban a user by identity key.
pub fn ban_user(&self, req: BanUserReq) -> Result<BanUserResp, DomainError> {
if req.identity_key.len() != 32 {
return Err(DomainError::InvalidIdentityKey(req.identity_key.len()));
}
let expires_at = if req.duration_secs == 0 {
0 // permanent
} else {
now_secs() + req.duration_secs
};
self.store
.ban_user(&req.identity_key, &req.reason, expires_at)
.map_err(DomainError::Storage)?;
tracing::info!(
identity_prefix = %hex_prefix(&req.identity_key),
reason = %req.reason,
expires_at,
"audit: user banned"
);
Ok(BanUserResp { success: true })
}
/// Unban a user by identity key.
pub fn unban_user(&self, req: UnbanUserReq) -> Result<UnbanUserResp, DomainError> {
if req.identity_key.len() != 32 {
return Err(DomainError::InvalidIdentityKey(req.identity_key.len()));
}
let removed = self
.store
.unban_user(&req.identity_key)
.map_err(DomainError::Storage)?;
if removed {
tracing::info!(
identity_prefix = %hex_prefix(&req.identity_key),
"audit: user unbanned"
);
}
Ok(UnbanUserResp { success: removed })
}
/// Check if a user is currently banned.
pub fn check_ban(&self, identity_key: &[u8]) -> Result<Option<String>, DomainError> {
self.store
.is_banned(identity_key)
.map_err(DomainError::Storage)
}
/// List reports with pagination.
pub fn list_reports(&self, req: ListReportsReq) -> Result<ListReportsResp, DomainError> {
let raw = self
.store
.list_reports(req.limit, req.offset)
.map_err(DomainError::Storage)?;
let reports = raw
.into_iter()
.map(
|(id, encrypted_report, conversation_id, reporter_identity, timestamp)| {
ReportEntry {
id,
encrypted_report,
conversation_id,
reporter_identity,
timestamp,
}
},
)
.collect();
Ok(ListReportsResp { reports })
}
/// List all currently banned users.
pub fn list_banned(&self) -> Result<ListBannedResp, DomainError> {
let raw = self.store.list_banned().map_err(DomainError::Storage)?;
let users = raw
.into_iter()
.map(
|(identity_key, reason, banned_at, expires_at)| BannedUserEntry {
identity_key,
reason,
banned_at,
expires_at,
},
)
.collect();
Ok(ListBannedResp { users })
}
}
fn now_secs() -> u64 {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs()
}
fn hex_prefix(bytes: &[u8]) -> String {
let len = bytes.len().min(4);
let hex: String = bytes[..len].iter().map(|b| format!("{b:02x}")).collect();
format!("{hex}...")
}
#[cfg(test)]
#[allow(clippy::unwrap_used)]
mod tests {
use super::*;
use crate::storage::FileBackedStore;
fn test_service() -> (tempfile::TempDir, ModerationService) {
let dir = tempfile::tempdir().unwrap();
let store = Arc::new(FileBackedStore::open(dir.path()).unwrap());
let svc = ModerationService { store };
(dir, svc)
}
#[test]
fn report_store_and_list() {
let (_dir, svc) = test_service();
let resp = svc
.report_message(ReportMessageReq {
encrypted_report: vec![1, 2, 3],
conversation_id: vec![10; 16],
reporter_identity: vec![20; 32],
})
.unwrap();
assert!(resp.accepted);
let reports = svc
.list_reports(ListReportsReq {
limit: 10,
offset: 0,
})
.unwrap();
assert_eq!(reports.reports.len(), 1);
assert_eq!(reports.reports[0].encrypted_report, vec![1, 2, 3]);
assert_eq!(reports.reports[0].conversation_id, vec![10; 16]);
assert_eq!(reports.reports[0].reporter_identity, vec![20; 32]);
}
#[test]
fn report_empty_rejected() {
let (_dir, svc) = test_service();
let result = svc.report_message(ReportMessageReq {
encrypted_report: vec![],
conversation_id: vec![10; 16],
reporter_identity: vec![20; 32],
});
assert!(result.is_err());
}
#[test]
fn ban_unban_lifecycle() {
let (_dir, svc) = test_service();
let ik = vec![1u8; 32];
// Not banned initially.
assert!(svc.check_ban(&ik).unwrap().is_none());
// Ban permanently.
let resp = svc
.ban_user(BanUserReq {
identity_key: ik.clone(),
reason: "spam".into(),
duration_secs: 0,
})
.unwrap();
assert!(resp.success);
// Now banned.
let reason = svc.check_ban(&ik).unwrap();
assert_eq!(reason, Some("spam".to_string()));
// Listed in banned users.
let banned = svc.list_banned().unwrap();
assert_eq!(banned.users.len(), 1);
assert_eq!(banned.users[0].identity_key, ik);
assert_eq!(banned.users[0].reason, "spam");
assert_eq!(banned.users[0].expires_at, 0); // permanent
// Unban.
let resp = svc.unban_user(UnbanUserReq { identity_key: ik.clone() }).unwrap();
assert!(resp.success);
// No longer banned.
assert!(svc.check_ban(&ik).unwrap().is_none());
assert!(svc.list_banned().unwrap().users.is_empty());
}
#[test]
fn ban_invalid_identity_key() {
let (_dir, svc) = test_service();
let result = svc.ban_user(BanUserReq {
identity_key: vec![1u8; 16], // wrong length
reason: "test".into(),
duration_secs: 0,
});
assert!(result.is_err());
}
#[test]
fn list_reports_pagination() {
let (_dir, svc) = test_service();
for i in 0..5u8 {
svc.report_message(ReportMessageReq {
encrypted_report: vec![i],
conversation_id: vec![10; 16],
reporter_identity: vec![20; 32],
})
.unwrap();
}
let page1 = svc
.list_reports(ListReportsReq {
limit: 2,
offset: 0,
})
.unwrap();
assert_eq!(page1.reports.len(), 2);
assert_eq!(page1.reports[0].encrypted_report, vec![0]);
let page2 = svc
.list_reports(ListReportsReq {
limit: 2,
offset: 2,
})
.unwrap();
assert_eq!(page2.reports.len(), 2);
assert_eq!(page2.reports[0].encrypted_report, vec![2]);
let page3 = svc
.list_reports(ListReportsReq {
limit: 2,
offset: 4,
})
.unwrap();
assert_eq!(page3.reports.len(), 1);
}
#[test]
fn unban_nonexistent_returns_false() {
let (_dir, svc) = test_service();
let resp = svc
.unban_user(UnbanUserReq {
identity_key: vec![99u8; 32],
})
.unwrap();
assert!(!resp.success);
}
}

View File

@@ -0,0 +1,131 @@
//! Cross-node notification bus for message delivery fan-out.
//!
//! When a message is enqueued, the bus publishes a notification so that
//! any node running a `fetch_wait` long-poll for that recipient can
//! wake up — even if the enqueue happened on a different node.
//!
//! Two backends:
//! - `InMemoryNotificationBus`: single-node, tokio::sync::Notify (default)
//! - Redis pub/sub (feature-gated `redis-pubsub`, implemented externally)
use std::sync::Arc;
use dashmap::DashMap;
use tokio::sync::Notify;
// ── Trait ────────────────────────────────────────────────────────────────────
/// Cross-node notification bus.
///
/// Publishers call `publish` when a message is enqueued.
/// Subscribers call `subscribe` to get a future that resolves when
/// a notification arrives for the given topic.
pub trait NotificationBus: Send + Sync {
/// Notify all waiters for `topic` that new data is available.
fn publish(&self, topic: &[u8]);
/// Return a future that completes when `topic` receives a notification.
/// The returned `Notify` can be `.notified().await`'d.
fn subscribe(&self, topic: &[u8]) -> Arc<Notify>;
}
// ── In-memory implementation ────────────────────────────────────────────────
/// Single-node notification bus backed by `tokio::sync::Notify`.
///
/// This is the default for single-node deployments. For multi-node,
/// replace with a Redis pub/sub or NATS implementation.
pub struct InMemoryNotificationBus {
waiters: DashMap<Vec<u8>, Arc<Notify>>,
}
impl InMemoryNotificationBus {
pub fn new() -> Self {
Self {
waiters: DashMap::new(),
}
}
}
impl Default for InMemoryNotificationBus {
fn default() -> Self {
Self::new()
}
}
impl NotificationBus for InMemoryNotificationBus {
fn publish(&self, topic: &[u8]) {
if let Some(notify) = self.waiters.get(topic) {
notify.notify_waiters();
}
}
fn subscribe(&self, topic: &[u8]) -> Arc<Notify> {
self.waiters
.entry(topic.to_vec())
.or_insert_with(|| Arc::new(Notify::new()))
.clone()
}
}
/// Create the default notification bus (in-memory, single-node).
pub fn default_notification_bus() -> Arc<dyn NotificationBus> {
Arc::new(InMemoryNotificationBus::new())
}
// ── Tests ───────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
use std::time::Duration;
#[tokio::test]
async fn publish_wakes_subscriber() {
let bus = InMemoryNotificationBus::new();
let topic = b"user:alice";
let notify = bus.subscribe(topic);
let notified = notify.notified();
// Publish from another "node" (same process in this case).
bus.publish(topic);
// Should resolve immediately since we published.
tokio::time::timeout(Duration::from_millis(100), notified)
.await
.expect("notification should arrive");
}
#[tokio::test]
async fn no_publish_times_out() {
let bus = InMemoryNotificationBus::new();
let topic = b"user:bob";
let notify = bus.subscribe(topic);
let notified = notify.notified();
let result = tokio::time::timeout(Duration::from_millis(50), notified).await;
assert!(result.is_err(), "should time out without publish");
}
#[tokio::test]
async fn independent_topics() {
let bus = InMemoryNotificationBus::new();
let notify_a = bus.subscribe(b"topic-a");
let notified_a = notify_a.notified();
let notify_b = bus.subscribe(b"topic-b");
let notified_b = notify_b.notified();
// Only publish to topic-a.
bus.publish(b"topic-a");
tokio::time::timeout(Duration::from_millis(100), notified_a)
.await
.expect("topic-a should wake");
let result = tokio::time::timeout(Duration::from_millis(50), notified_b).await;
assert!(result.is_err(), "topic-b should not wake");
}
}

View File

@@ -0,0 +1,50 @@
//! P2P endpoint domain logic — publish, resolve, health.
use std::sync::Arc;
use crate::storage::Store;
use super::types::*;
/// Domain service for P2P endpoint management and health checks.
pub struct P2pService {
pub store: Arc<dyn Store>,
}
impl P2pService {
pub fn publish_endpoint(
&self,
req: PublishEndpointReq,
_auth: &CallerAuth,
) -> Result<(), DomainError> {
if req.identity_key.len() != 32 {
return Err(DomainError::InvalidIdentityKey(req.identity_key.len()));
}
self.store
.publish_endpoint(&req.identity_key, req.node_addr)?;
Ok(())
}
pub fn resolve_endpoint(
&self,
req: ResolveEndpointReq,
_auth: &CallerAuth,
) -> Result<ResolveEndpointResp, DomainError> {
if req.identity_key.len() != 32 {
return Err(DomainError::InvalidIdentityKey(req.identity_key.len()));
}
let node_addr = self
.store
.resolve_endpoint(&req.identity_key)?
.unwrap_or_default();
Ok(ResolveEndpointResp { node_addr })
}
pub fn health() -> HealthResp {
HealthResp {
status: "ok".into(),
}
}
}

View File

@@ -0,0 +1,257 @@
//! Distributed rate limiting — sliding window algorithm.
//!
//! Two backends:
//! - `InMemoryRateLimiter`: single-process, DashMap-based (default)
//! - `RedisRateLimiter`: shared across nodes via Redis (feature-gated `redis-ratelimit`)
use std::sync::Arc;
use std::time::{Duration, Instant};
use dashmap::DashMap;
// ── Public types ────────────────────────────────────────────────────────────
/// Result of a rate-limit check.
#[derive(Debug, Clone)]
pub struct RateResult {
/// Whether the request is allowed.
pub allowed: bool,
/// Remaining requests in the current window.
pub remaining: u32,
/// When the window resets (seconds from now).
pub retry_after_secs: u32,
}
/// Configuration for a specific rate-limit bucket.
#[derive(Debug, Clone)]
pub struct RateLimitConfig {
/// Maximum number of requests in the window.
pub max_requests: u32,
/// Length of the sliding window.
pub window: Duration,
}
impl Default for RateLimitConfig {
fn default() -> Self {
Self {
max_requests: 100,
window: Duration::from_secs(60),
}
}
}
// ── Trait ────────────────────────────────────────────────────────────────────
/// Abstraction over rate-limit backends.
pub trait RateLimiter: Send + Sync {
/// Check whether `key` is within its rate limit. If allowed, the counter
/// is incremented atomically.
fn check_rate(&self, key: &str, config: &RateLimitConfig) -> RateResult;
}
// ── In-memory sliding window ────────────────────────────────────────────────
/// Per-key state for the sliding window algorithm.
struct SlidingWindow {
/// Timestamps of recent requests within the window.
timestamps: Vec<u64>,
}
/// In-memory rate limiter using a sliding window log.
pub struct InMemoryRateLimiter {
buckets: DashMap<String, SlidingWindow>,
/// Last time we ran GC on expired entries.
last_gc: std::sync::Mutex<Instant>,
}
impl InMemoryRateLimiter {
pub fn new() -> Self {
Self {
buckets: DashMap::new(),
last_gc: std::sync::Mutex::new(Instant::now()),
}
}
fn now_millis() -> u64 {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis() as u64
}
/// Remove entries whose entire window has expired. Called lazily.
fn gc_if_needed(&self, window: Duration) {
let should_gc = {
let Ok(last) = self.last_gc.lock() else {
return;
};
last.elapsed() > Duration::from_secs(60)
};
if !should_gc {
return;
}
if let Ok(mut last) = self.last_gc.lock() {
*last = Instant::now();
}
let now_ms = Self::now_millis();
let window_ms = window.as_millis() as u64;
self.buckets.retain(|_key, window_state| {
// Keep if any timestamp is within the window.
window_state
.timestamps
.iter()
.any(|&ts| now_ms.saturating_sub(ts) < window_ms)
});
}
}
impl Default for InMemoryRateLimiter {
fn default() -> Self {
Self::new()
}
}
impl RateLimiter for InMemoryRateLimiter {
fn check_rate(&self, key: &str, config: &RateLimitConfig) -> RateResult {
let now_ms = Self::now_millis();
let window_ms = config.window.as_millis() as u64;
self.gc_if_needed(config.window);
let mut entry = self.buckets.entry(key.to_string()).or_insert(SlidingWindow {
timestamps: Vec::new(),
});
// Evict timestamps outside the sliding window.
let cutoff = now_ms.saturating_sub(window_ms);
entry.timestamps.retain(|&ts| ts > cutoff);
let count = entry.timestamps.len() as u32;
if count >= config.max_requests {
// Find earliest timestamp to compute retry-after.
let earliest = entry.timestamps.iter().copied().min().unwrap_or(now_ms);
let retry_after_ms = (earliest + window_ms).saturating_sub(now_ms);
return RateResult {
allowed: false,
remaining: 0,
retry_after_secs: (retry_after_ms / 1000).max(1) as u32,
};
}
entry.timestamps.push(now_ms);
let remaining = config.max_requests.saturating_sub(count + 1);
RateResult {
allowed: true,
remaining,
retry_after_secs: 0,
}
}
}
/// Create the default rate limiter (in-memory).
pub fn default_rate_limiter() -> Arc<dyn RateLimiter> {
Arc::new(InMemoryRateLimiter::new())
}
// ── Tests ───────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn allows_within_limit() {
let limiter = InMemoryRateLimiter::new();
let config = RateLimitConfig {
max_requests: 3,
window: Duration::from_secs(60),
};
for _ in 0..3 {
let result = limiter.check_rate("user1", &config);
assert!(result.allowed);
}
}
#[test]
fn blocks_over_limit() {
let limiter = InMemoryRateLimiter::new();
let config = RateLimitConfig {
max_requests: 2,
window: Duration::from_secs(60),
};
assert!(limiter.check_rate("user1", &config).allowed);
assert!(limiter.check_rate("user1", &config).allowed);
let result = limiter.check_rate("user1", &config);
assert!(!result.allowed);
assert_eq!(result.remaining, 0);
assert!(result.retry_after_secs > 0);
}
#[test]
fn independent_keys() {
let limiter = InMemoryRateLimiter::new();
let config = RateLimitConfig {
max_requests: 1,
window: Duration::from_secs(60),
};
assert!(limiter.check_rate("user1", &config).allowed);
assert!(!limiter.check_rate("user1", &config).allowed);
// Different key should still be allowed.
assert!(limiter.check_rate("user2", &config).allowed);
}
#[test]
fn remaining_decreases() {
let limiter = InMemoryRateLimiter::new();
let config = RateLimitConfig {
max_requests: 5,
window: Duration::from_secs(60),
};
let r1 = limiter.check_rate("user1", &config);
assert_eq!(r1.remaining, 4);
let r2 = limiter.check_rate("user1", &config);
assert_eq!(r2.remaining, 3);
}
#[test]
fn concurrent_access_is_safe() {
use std::sync::Arc;
use std::thread;
let limiter = Arc::new(InMemoryRateLimiter::new());
let config = RateLimitConfig {
max_requests: 1000,
window: Duration::from_secs(60),
};
let handles: Vec<_> = (0..10)
.map(|_| {
let l = Arc::clone(&limiter);
let c = config.clone();
thread::spawn(move || {
for _ in 0..100 {
l.check_rate("shared_key", &c);
}
})
})
.collect();
for h in handles {
h.join().expect("thread panicked");
}
// After 1000 requests exactly, next should be blocked.
let result = limiter.check_rate("shared_key", &config);
assert!(!result.allowed);
}
}

View File

@@ -0,0 +1,76 @@
//! Recovery domain logic — encrypted recovery bundle CRUD.
use std::sync::Arc;
use crate::storage::Store;
use super::types::DomainError;
/// Maximum recovery bundle size: 64 KiB.
const MAX_BUNDLE_SIZE: usize = 64 * 1024;
/// Default TTL for recovery bundles: 90 days.
pub const DEFAULT_TTL_SECS: u64 = 90 * 24 * 60 * 60;
/// Domain service for recovery bundle operations.
pub struct RecoveryService {
pub store: Arc<dyn Store>,
}
impl RecoveryService {
/// Store an encrypted recovery bundle.
///
/// `token_hash` is the SHA-256 of a recovery token derived from the code.
/// `bundle` is the encrypted blob (opaque to server).
/// `ttl_secs` is the time-to-live; 0 uses the default (90 days).
pub fn store_bundle(
&self,
token_hash: &[u8],
bundle: Vec<u8>,
ttl_secs: u64,
) -> Result<(), DomainError> {
if token_hash.len() != 32 {
return Err(DomainError::BadParams(format!(
"token_hash must be 32 bytes, got {}",
token_hash.len()
)));
}
if bundle.is_empty() {
return Err(DomainError::BadParams("recovery bundle must not be empty".into()));
}
if bundle.len() > MAX_BUNDLE_SIZE {
return Err(DomainError::BadParams(format!(
"recovery bundle exceeds max size ({} > {MAX_BUNDLE_SIZE})",
bundle.len()
)));
}
let ttl = if ttl_secs == 0 { DEFAULT_TTL_SECS } else { ttl_secs };
self.store.store_recovery_bundle(token_hash, bundle, ttl)?;
Ok(())
}
/// Fetch an encrypted recovery bundle by token_hash.
pub fn fetch_bundle(&self, token_hash: &[u8]) -> Result<Option<Vec<u8>>, DomainError> {
if token_hash.len() != 32 {
return Err(DomainError::BadParams(format!(
"token_hash must be 32 bytes, got {}",
token_hash.len()
)));
}
let bundle = self.store.get_recovery_bundle(token_hash)?;
Ok(bundle)
}
/// Delete an encrypted recovery bundle by token_hash.
pub fn delete_bundle(&self, token_hash: &[u8]) -> Result<bool, DomainError> {
if token_hash.len() != 32 {
return Err(DomainError::BadParams(format!(
"token_hash must be 32 bytes, got {}",
token_hash.len()
)));
}
let deleted = self.store.delete_recovery_bundle(token_hash)?;
Ok(deleted)
}
}

View File

@@ -0,0 +1,249 @@
//! Traffic analysis resistance — decoy traffic generation and timing jitter.
//!
//! When enabled (via the `traffic-resistance` feature), the server:
//!
//! 1. Pads all enqueued payloads to a uniform boundary using [`quicprochat_core::padding::pad_uniform`].
//! 2. Injects random jitter delays before enqueue responses to mask timing patterns.
//! 3. Runs a background decoy traffic generator that enqueues fake encrypted messages
//! at a configurable rate to connected recipients.
//!
//! Decoy messages are indistinguishable from real padded messages on the wire.
//! Recipients detect and discard them by unpadding to an empty payload.
use std::sync::Arc;
use rand::Rng;
use tokio::sync::Notify;
use super::delivery::DeliveryService;
use super::types::EnqueueReq;
/// Configuration for traffic analysis resistance.
#[derive(Clone, Debug)]
pub struct TrafficResistanceConfig {
/// Padding boundary in bytes (default 256). All enqueued payloads are
/// padded to the nearest multiple of this value.
pub padding_boundary: usize,
/// Mean interval in milliseconds between decoy messages per recipient.
/// Set to 0 to disable decoy traffic.
pub decoy_interval_ms: u64,
/// Maximum random jitter in milliseconds added before enqueue responses.
/// Set to 0 to disable jitter.
pub jitter_max_ms: u64,
}
impl Default for TrafficResistanceConfig {
fn default() -> Self {
Self {
padding_boundary: quicprochat_core::padding::DEFAULT_PADDING_BOUNDARY,
decoy_interval_ms: 5000,
jitter_max_ms: 50,
}
}
}
/// Pad a payload to the configured uniform boundary.
pub fn pad_payload(payload: &[u8], config: &TrafficResistanceConfig) -> Vec<u8> {
quicprochat_core::padding::pad_uniform(payload, config.padding_boundary)
}
/// Apply random jitter delay to mask timing patterns.
///
/// Sleeps for a random duration in `[0, config.jitter_max_ms)` milliseconds.
/// Does nothing if `jitter_max_ms` is 0.
pub async fn apply_jitter(config: &TrafficResistanceConfig) {
if config.jitter_max_ms == 0 {
return;
}
let jitter_ms = rand::thread_rng().gen_range(0..config.jitter_max_ms);
if jitter_ms > 0 {
tokio::time::sleep(std::time::Duration::from_millis(jitter_ms)).await;
}
}
/// Spawn a background task that generates decoy traffic.
///
/// Sends decoy messages to the provided `recipient_keys` at random intervals
/// around `config.decoy_interval_ms`. The task runs until `shutdown` is notified.
///
/// Returns a `JoinHandle` for the spawned task.
pub fn spawn_decoy_generator(
delivery: Arc<DeliveryService>,
recipient_keys: Vec<Vec<u8>>,
channel_id: Vec<u8>,
config: TrafficResistanceConfig,
shutdown: Arc<Notify>,
) -> tokio::task::JoinHandle<()> {
tokio::spawn(async move {
if config.decoy_interval_ms == 0 || recipient_keys.is_empty() {
// Decoy traffic disabled or no recipients — wait for shutdown.
shutdown.notified().await;
return;
}
let base_interval = std::time::Duration::from_millis(config.decoy_interval_ms);
loop {
// Randomize interval: 50%150% of base to avoid periodic patterns.
let jitter_factor: f64 = rand::thread_rng().gen_range(0.5..1.5);
let interval = base_interval.mul_f64(jitter_factor);
tokio::select! {
() = tokio::time::sleep(interval) => {}
() = shutdown.notified() => {
tracing::debug!("decoy traffic generator shutting down");
return;
}
}
// Pick a random recipient.
let idx = rand::thread_rng().gen_range(0..recipient_keys.len());
let recipient_key = &recipient_keys[idx];
// Generate a decoy payload that is indistinguishable from a real padded message.
let decoy = quicprochat_core::padding::generate_decoy(config.padding_boundary);
let req = EnqueueReq {
recipient_key: recipient_key.clone(),
payload: decoy,
channel_id: channel_id.clone(),
ttl_secs: 60, // Short TTL for decoys.
};
match delivery.enqueue(req) {
Ok(_) => {
tracing::trace!("decoy message injected");
}
Err(e) => {
tracing::warn!(error = %e, "failed to inject decoy message");
}
}
}
})
}
#[cfg(test)]
#[allow(clippy::unwrap_used)]
mod tests {
use super::*;
use crate::storage::FileBackedStore;
use dashmap::DashMap;
fn test_delivery() -> (tempfile::TempDir, Arc<DeliveryService>) {
let dir = tempfile::tempdir().unwrap();
let store = Arc::new(FileBackedStore::open(dir.path()).unwrap());
let svc = Arc::new(DeliveryService {
store,
waiters: Arc::new(DashMap::new()),
});
(dir, svc)
}
#[test]
fn pad_payload_is_boundary_aligned() {
let config = TrafficResistanceConfig {
padding_boundary: 256,
..Default::default()
};
let payload = b"test message";
let padded = pad_payload(payload, &config);
assert_eq!(padded.len() % 256, 0);
// Unpad should recover original.
let unpadded = quicprochat_core::padding::unpad_uniform(&padded).unwrap();
assert_eq!(unpadded, payload);
}
#[test]
fn pad_payload_custom_boundary() {
let config = TrafficResistanceConfig {
padding_boundary: 512,
..Default::default()
};
let payload = vec![0xAA; 300];
let padded = pad_payload(&payload, &config);
assert_eq!(padded.len() % 512, 0);
assert_eq!(padded.len(), 512);
}
#[tokio::test]
async fn jitter_zero_is_noop() {
let config = TrafficResistanceConfig {
jitter_max_ms: 0,
..Default::default()
};
let start = std::time::Instant::now();
apply_jitter(&config).await;
// Should return almost immediately.
assert!(start.elapsed() < std::time::Duration::from_millis(5));
}
#[tokio::test]
async fn decoy_generator_produces_messages() {
let (_dir, delivery) = test_delivery();
let recipient = vec![0xFFu8; 32];
let channel = vec![0u8; 16];
let shutdown = Arc::new(Notify::new());
let config = TrafficResistanceConfig {
padding_boundary: 256,
decoy_interval_ms: 50, // Fast interval for testing.
jitter_max_ms: 0,
};
let handle = spawn_decoy_generator(
Arc::clone(&delivery),
vec![recipient.clone()],
channel.clone(),
config,
Arc::clone(&shutdown),
);
// Wait enough time for at least one decoy.
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
shutdown.notify_one();
handle.await.unwrap();
// Check that decoy messages were enqueued.
let fetched = delivery
.fetch(super::super::types::FetchReq {
recipient_key: recipient,
channel_id: channel,
limit: 100,
})
.unwrap();
assert!(!fetched.payloads.is_empty(), "decoy generator should have enqueued at least one message");
// Every decoy should unpad to an empty payload.
for env in &fetched.payloads {
let unpadded = quicprochat_core::padding::unpad_uniform(&env.data).unwrap();
assert!(unpadded.is_empty(), "decoy payload should unpad to empty");
}
}
#[tokio::test]
async fn decoy_generator_disabled_when_zero_interval() {
let (_dir, delivery) = test_delivery();
let shutdown = Arc::new(Notify::new());
let config = TrafficResistanceConfig {
decoy_interval_ms: 0,
..Default::default()
};
let handle = spawn_decoy_generator(
delivery,
vec![vec![1u8; 32]],
vec![0u8; 16],
config,
Arc::clone(&shutdown),
);
// Signal shutdown immediately — should return without having sent anything.
shutdown.notify_one();
handle.await.unwrap();
}
}

View File

@@ -0,0 +1,441 @@
//! Plain Rust request/response types for server domain logic.
//!
//! No proto, no capnp — just Rust structs.
use crate::storage::StorageError;
// ── Domain Error ────────────────────────────────────────────────────────────
/// Errors returned by domain service methods.
#[derive(thiserror::Error, Debug)]
pub enum DomainError {
#[error("identity key must be exactly 32 bytes, got {0}")]
InvalidIdentityKey(usize),
#[error("key package must not be empty")]
EmptyPackage,
#[error("key package exceeds max size ({0} bytes)")]
PackageTooLarge(usize),
#[error("hybrid public key must not be empty")]
EmptyHybridKey,
#[error("username must not be empty")]
EmptyUsername,
#[error("blob hash must be exactly 32 bytes, got {0}")]
BlobHashLength(usize),
#[error("blob exceeds max size ({0} bytes)")]
BlobTooLarge(u64),
#[error("SHA-256 of uploaded data does not match blob hash")]
BlobHashMismatch,
#[error("blob not found")]
BlobNotFound,
#[error("maximum {0} devices per identity")]
DeviceLimit(usize),
#[error("device not found")]
DeviceNotFound,
#[error("group not found")]
GroupNotFound,
#[error("bad parameters: {0}")]
BadParams(String),
#[error("I/O error: {0}")]
Io(String),
#[error("storage error: {0}")]
Storage(#[from] StorageError),
}
// ── Auth ─────────────────────────────────────────────────────────────────────
/// Caller authentication context (resolved from session token).
#[derive(Debug, Clone)]
pub struct CallerAuth {
/// Ed25519 identity key of the authenticated caller (32 bytes).
pub identity_key: Vec<u8>,
/// Session token bytes.
pub token: Vec<u8>,
/// Device ID (optional, for auditing).
pub device_id: Option<Vec<u8>>,
}
/// OPAQUE registration start.
pub struct RegisterStartReq {
pub username: String,
pub request_bytes: Vec<u8>,
}
pub struct RegisterStartResp {
pub response_bytes: Vec<u8>,
}
/// OPAQUE registration finish.
pub struct RegisterFinishReq {
pub username: String,
pub upload_bytes: Vec<u8>,
pub identity_key: Vec<u8>,
}
pub struct RegisterFinishResp {
pub success: bool,
}
/// OPAQUE login start.
pub struct LoginStartReq {
pub username: String,
pub request_bytes: Vec<u8>,
}
pub struct LoginStartResp {
pub response_bytes: Vec<u8>,
}
/// OPAQUE login finish.
pub struct LoginFinishReq {
pub username: String,
pub finalization_bytes: Vec<u8>,
pub identity_key: Vec<u8>,
}
pub struct LoginFinishResp {
pub session_token: Vec<u8>,
}
// ── Delivery ─────────────────────────────────────────────────────────────────
/// An envelope pairing a sequence number with an opaque payload.
#[derive(Debug, Clone)]
pub struct Envelope {
pub seq: u64,
pub data: Vec<u8>,
}
pub struct EnqueueReq {
pub recipient_key: Vec<u8>,
pub payload: Vec<u8>,
pub channel_id: Vec<u8>,
pub ttl_secs: u32,
}
pub struct EnqueueResp {
pub seq: u64,
pub delivery_proof: Vec<u8>,
}
pub struct FetchReq {
pub recipient_key: Vec<u8>,
pub channel_id: Vec<u8>,
pub limit: u32,
}
pub struct FetchResp {
pub payloads: Vec<Envelope>,
}
pub struct PeekReq {
pub recipient_key: Vec<u8>,
pub channel_id: Vec<u8>,
pub limit: u32,
}
pub struct PeekResp {
pub payloads: Vec<Envelope>,
}
pub struct AckReq {
pub recipient_key: Vec<u8>,
pub channel_id: Vec<u8>,
pub seq_up_to: u64,
}
pub struct BatchEnqueueReq {
pub recipient_keys: Vec<Vec<u8>>,
pub payload: Vec<u8>,
pub channel_id: Vec<u8>,
pub ttl_secs: u32,
}
pub struct BatchEnqueueResp {
pub seqs: Vec<u64>,
}
// ── Keys ─────────────────────────────────────────────────────────────────────
pub struct UploadKeyPackageReq {
pub identity_key: Vec<u8>,
pub package: Vec<u8>,
}
pub struct UploadKeyPackageResp {
pub fingerprint: Vec<u8>,
}
pub struct FetchKeyPackageReq {
pub identity_key: Vec<u8>,
}
pub struct FetchKeyPackageResp {
pub package: Vec<u8>,
}
pub struct UploadHybridKeyReq {
pub identity_key: Vec<u8>,
pub hybrid_public_key: Vec<u8>,
}
pub struct FetchHybridKeyReq {
pub identity_key: Vec<u8>,
}
pub struct FetchHybridKeyResp {
pub hybrid_public_key: Vec<u8>,
}
pub struct FetchHybridKeysReq {
pub identity_keys: Vec<Vec<u8>>,
}
pub struct FetchHybridKeysResp {
pub keys: Vec<Vec<u8>>,
}
// ── Key Transparency / Revocation ────────────────────────────────────
pub struct RevokeKeyReq {
pub identity_key: Vec<u8>,
pub reason: String,
}
pub struct RevokeKeyResp {
pub success: bool,
pub leaf_index: u64,
}
pub struct CheckRevocationReq {
pub identity_key: Vec<u8>,
}
pub struct CheckRevocationResp {
pub revoked: bool,
pub reason: String,
pub timestamp_ms: u64,
}
pub struct AuditKeyTransparencyReq {
pub start: u64,
pub end: u64,
}
pub struct AuditLogEntry {
pub index: u64,
pub leaf_hash: Vec<u8>,
}
pub struct AuditKeyTransparencyResp {
pub entries: Vec<AuditLogEntry>,
pub tree_size: u64,
pub root: Vec<u8>,
}
// ── Channel ──────────────────────────────────────────────────────────────────
pub struct CreateChannelReq {
pub peer_key: Vec<u8>,
}
pub struct CreateChannelResp {
pub channel_id: Vec<u8>,
pub was_new: bool,
}
// ── User ─────────────────────────────────────────────────────────────────────
pub struct ResolveUserReq {
pub username: String,
}
pub struct ResolveUserResp {
pub identity_key: Vec<u8>,
pub inclusion_proof: Vec<u8>,
}
pub struct ResolveIdentityReq {
pub identity_key: Vec<u8>,
}
pub struct ResolveIdentityResp {
pub username: String,
}
// ── Blob ─────────────────────────────────────────────────────────────────────
pub struct UploadBlobReq {
pub blob_hash: Vec<u8>,
pub chunk: Vec<u8>,
pub offset: u64,
pub total_size: u64,
pub mime_type: String,
}
pub struct UploadBlobResp {
pub blob_id: Vec<u8>,
}
pub struct DownloadBlobReq {
pub blob_id: Vec<u8>,
pub offset: u64,
pub length: u32,
}
pub struct DownloadBlobResp {
pub chunk: Vec<u8>,
pub total_size: u64,
pub mime_type: String,
}
// ── Device ───────────────────────────────────────────────────────────────────
pub struct RegisterDeviceReq {
pub device_id: Vec<u8>,
pub device_name: String,
}
pub struct RegisterDeviceResp {
pub success: bool,
}
pub struct DeviceInfo {
pub device_id: Vec<u8>,
pub device_name: String,
pub registered_at: u64,
}
pub struct ListDevicesResp {
pub devices: Vec<DeviceInfo>,
}
pub struct RevokeDeviceReq {
pub device_id: Vec<u8>,
}
pub struct RevokeDeviceResp {
pub success: bool,
}
// ── Group metadata ───────────────────────────────────────────────────
pub struct GroupMetadata {
pub group_id: Vec<u8>,
pub name: String,
pub description: String,
pub avatar_hash: Vec<u8>,
pub creator_key: Vec<u8>,
pub created_at: u64,
}
pub struct UpdateGroupMetadataReq {
pub group_id: Vec<u8>,
pub name: String,
pub description: String,
pub avatar_hash: Vec<u8>,
}
pub struct ListGroupMembersReq {
pub group_id: Vec<u8>,
}
pub struct GroupMemberInfo {
pub identity_key: Vec<u8>,
pub username: String,
pub joined_at: u64,
}
pub struct ListGroupMembersResp {
pub members: Vec<GroupMemberInfo>,
}
// ── Moderation ───────────────────────────────────────────────────────────────
pub struct ReportMessageReq {
pub encrypted_report: Vec<u8>,
pub conversation_id: Vec<u8>,
pub reporter_identity: Vec<u8>,
}
pub struct ReportMessageResp {
pub accepted: bool,
}
pub struct BanUserReq {
pub identity_key: Vec<u8>,
pub reason: String,
pub duration_secs: u64,
}
pub struct BanUserResp {
pub success: bool,
}
pub struct UnbanUserReq {
pub identity_key: Vec<u8>,
}
pub struct UnbanUserResp {
pub success: bool,
}
pub struct ListReportsReq {
pub limit: u32,
pub offset: u32,
}
pub struct ReportEntry {
pub id: u64,
pub encrypted_report: Vec<u8>,
pub conversation_id: Vec<u8>,
pub reporter_identity: Vec<u8>,
pub timestamp: u64,
}
pub struct ListReportsResp {
pub reports: Vec<ReportEntry>,
}
pub struct BannedUserEntry {
pub identity_key: Vec<u8>,
pub reason: String,
pub banned_at: u64,
pub expires_at: u64,
}
pub struct ListBannedResp {
pub users: Vec<BannedUserEntry>,
}
// ── P2P ──────────────────────────────────────────────────────────────────────
pub struct PublishEndpointReq {
pub identity_key: Vec<u8>,
pub node_addr: Vec<u8>,
}
pub struct ResolveEndpointReq {
pub identity_key: Vec<u8>,
}
pub struct ResolveEndpointResp {
pub node_addr: Vec<u8>,
}
pub struct HealthResp {
pub status: String,
}

View File

@@ -0,0 +1,146 @@
//! User resolution domain logic — username <-> identity key lookups.
use std::sync::{Arc, Mutex};
use quicprochat_kt::{MerkleLog, RevocationLog, RevocationReason};
use crate::storage::Store;
use super::types::*;
/// Domain service for user/identity resolution.
pub struct UserService {
pub store: Arc<dyn Store>,
pub kt_log: Arc<Mutex<MerkleLog>>,
pub revocation_log: Arc<Mutex<RevocationLog>>,
}
impl UserService {
pub fn resolve_user(&self, req: ResolveUserReq) -> Result<ResolveUserResp, DomainError> {
if req.username.is_empty() {
return Err(DomainError::EmptyUsername);
}
let identity_key = self
.store
.get_user_identity_key(&req.username)?
.unwrap_or_default();
let mut inclusion_proof = Vec::new();
if !identity_key.is_empty() {
if let Ok(log) = self.kt_log.lock() {
if let Some(leaf_idx) = log.find(&req.username, &identity_key) {
if let Ok(proof) = log.inclusion_proof(leaf_idx) {
if let Ok(bytes) = proof.to_bytes() {
inclusion_proof = bytes;
}
}
}
}
}
Ok(ResolveUserResp {
identity_key,
inclusion_proof,
})
}
pub fn resolve_identity(
&self,
req: ResolveIdentityReq,
) -> Result<ResolveIdentityResp, DomainError> {
if req.identity_key.len() != 32 {
return Err(DomainError::InvalidIdentityKey(req.identity_key.len()));
}
let username = self
.store
.resolve_identity_key(&req.identity_key)?
.unwrap_or_default();
Ok(ResolveIdentityResp { username })
}
/// Revoke an identity key in the Key Transparency log.
pub fn revoke_key(&self, req: RevokeKeyReq) -> Result<RevokeKeyResp, DomainError> {
if req.identity_key.len() != 32 {
return Err(DomainError::InvalidIdentityKey(req.identity_key.len()));
}
let reason = RevocationReason::from_tag(&req.reason)
.ok_or_else(|| DomainError::BadParams(format!("invalid revocation reason: {}", req.reason)))?;
let timestamp_ms = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_millis() as u64)
.unwrap_or(0);
let mut kt = self.kt_log.lock().map_err(|e| DomainError::Io(e.to_string()))?;
let mut revlog = self.revocation_log.lock().map_err(|e| DomainError::Io(e.to_string()))?;
let leaf_index = revlog
.revoke(&mut kt, &req.identity_key, reason, timestamp_ms)
.map_err(|e| DomainError::BadParams(e.to_string()))?;
// Persist updated logs.
if let Ok(bytes) = kt.to_bytes() {
let _ = self.store.save_kt_log(bytes);
}
if let Ok(bytes) = revlog.to_bytes() {
let _ = self.store.save_revocation_log(bytes);
}
Ok(RevokeKeyResp {
success: true,
leaf_index,
})
}
/// Check if an identity key has been revoked.
pub fn check_revocation(&self, req: CheckRevocationReq) -> Result<CheckRevocationResp, DomainError> {
let revlog = self.revocation_log.lock().map_err(|e| DomainError::Io(e.to_string()))?;
if let Some(entry) = revlog.get(&req.identity_key) {
Ok(CheckRevocationResp {
revoked: true,
reason: entry.reason.as_tag().to_string(),
timestamp_ms: entry.timestamp_ms,
})
} else {
Ok(CheckRevocationResp {
revoked: false,
reason: String::new(),
timestamp_ms: 0,
})
}
}
/// Return a range of KT log entries for client-side audit.
pub fn audit_key_transparency(
&self,
req: AuditKeyTransparencyReq,
) -> Result<AuditKeyTransparencyResp, DomainError> {
let kt = self.kt_log.lock().map_err(|e| DomainError::Io(e.to_string()))?;
let end = if req.end == 0 { kt.len() } else { req.end };
let log_entries = kt.audit_log(req.start, end);
let entries: Vec<AuditLogEntry> = log_entries
.into_iter()
.map(|(index, hash)| AuditLogEntry {
index,
leaf_hash: hash.to_vec(),
})
.collect();
let tree_size = kt.len();
let root = kt.root().map(|r| r.to_vec()).unwrap_or_default();
Ok(AuditKeyTransparencyResp {
entries,
tree_size,
root,
})
}
}