feat: DM epoch fix, federation relay, and mDNS mesh discovery

- schema: createChannel returns wasNew :Bool to elect the MLS initiator
  unambiguously; prevents duplicate group creation on concurrent /dm calls
- core: group helpers for epoch tracking and key-package lifecycle
- server: federation subsystem — mTLS QUIC server-to-server relay with
  Cap'n Proto RPC; enqueue/batchEnqueue relay unknown recipients to their
  home domain via FederationClient
- server: mDNS _quicproquo._udp.local. service announcement on startup
- server: storage + sql_store — identity_exists, peek/ack, federation
  home-server lookup helpers
- client: /mesh peers REPL command (mDNS discovery, feature = "mesh")
- client: MeshDiscovery — background mDNS browse with ServiceDaemon
- client: was_new=false path in cmd_dm waits for peer Welcome instead of
  creating a duplicate initiator group
- p2p: fix ALPN from quicnprotochat/p2p/1 → quicproquo/p2p/1
- workspace: re-include quicproquo-p2p in members
This commit is contained in:
2026-03-03 14:41:56 +01:00
parent e24497bf90
commit c8398d6cb7
27 changed files with 3375 additions and 303 deletions

View File

@@ -100,6 +100,42 @@ impl NodeServiceImpl {
}
}
// Federation routing: if the recipient's home server differs from ours, relay the
// message to the remote server instead of enqueueing locally. This enables
// cross-node delivery in a Freifunk / community mesh deployment.
if let (Some(fed_client), Some(local_domain)) =
(&self.federation_client, &self.local_domain)
{
let dest = crate::federation::routing::resolve_destination(
&self.store,
&recipient_key,
local_domain,
);
if let crate::federation::routing::Destination::Remote(remote_domain) = dest {
let fed = Arc::clone(fed_client);
let rk = recipient_key;
let pl = payload;
let ch = channel_id;
tracing::info!(
recipient_prefix = %fmt_hex(&rk[..4]),
domain = %remote_domain,
"federation: routing enqueue to remote server"
);
return Promise::from_future(async move {
let seq = fed
.relay_enqueue(&remote_domain, &rk, &pl, &ch)
.await
.map_err(|e| {
capnp::Error::failed(format!("federation relay failed: {e}"))
})?;
results.get().set_seq(seq);
metrics::record_enqueue_total();
metrics::record_enqueue_bytes(pl.len() as u64);
Ok(())
});
}
}
// DM channel authz: channel_id.len() == 16 means a created channel; caller and recipient must be the two members.
if channel_id.len() == 16 {
let members = match self.store.get_channel_members(&channel_id) {
@@ -591,7 +627,8 @@ impl NodeServiceImpl {
}
}
let mut seqs = Vec::with_capacity(recipient_keys.len() as usize);
// Eagerly collect recipient keys so params can be dropped before any async work.
let mut recipient_key_vecs: Vec<Vec<u8>> = Vec::with_capacity(recipient_keys.len() as usize);
for i in 0..recipient_keys.len() {
let rk = match recipient_keys.get(i) {
Ok(v) => v.to_vec(),
@@ -604,7 +641,7 @@ impl NodeServiceImpl {
));
}
// Per-recipient DM channel membership check.
// Per-recipient DM channel membership check (only when channel_id is a 16-byte UUID).
if channel_id.len() == 16 {
let members = match self.store.get_channel_members(&channel_id) {
Ok(Some(m)) => m,
@@ -631,44 +668,79 @@ impl NodeServiceImpl {
}
}
match self.store.queue_depth(&rk, &channel_id) {
Ok(depth) if depth >= MAX_QUEUE_DEPTH => {
return Promise::err(coded_error(
E015_QUEUE_FULL,
format!("queue depth {} exceeds limit {}", depth, MAX_QUEUE_DEPTH),
));
}
Err(e) => return Promise::err(storage_err(e)),
_ => {}
recipient_key_vecs.push(rk);
}
let n = recipient_key_vecs.len();
let store = Arc::clone(&self.store);
let waiters = Arc::clone(&self.waiters);
let fed_client = self.federation_client.clone();
let local_domain = self.local_domain.clone();
// Use an async future to support federation relay alongside local enqueue.
// All storage operations are synchronous; only federation relay calls are await-ed.
Promise::from_future(async move {
let mut seqs = Vec::with_capacity(n);
for rk in &recipient_key_vecs {
// Federation routing: relay to the recipient's home server when remote.
let dest = if let (Some(ref _fed), Some(ref domain)) = (&fed_client, &local_domain) {
crate::federation::routing::resolve_destination(&store, rk, domain)
} else {
crate::federation::routing::Destination::Local
};
let seq = match dest {
crate::federation::routing::Destination::Remote(ref remote_domain) => {
let fed = fed_client.as_deref().ok_or_else(|| {
capnp::Error::failed("federation client unavailable for remote routing".into())
})?;
tracing::info!(
recipient_prefix = %fmt_hex(&rk[..4]),
domain = %remote_domain,
"federation: routing batch enqueue to remote server"
);
fed.relay_enqueue(remote_domain, rk, &payload, &channel_id)
.await
.map_err(|e| {
capnp::Error::failed(format!("federation relay failed: {e}"))
})?
}
crate::federation::routing::Destination::Local => {
match store.queue_depth(rk, &channel_id) {
Ok(depth) if depth >= MAX_QUEUE_DEPTH => {
return Err(coded_error(
E015_QUEUE_FULL,
format!("queue depth {} exceeds limit {MAX_QUEUE_DEPTH}", depth),
));
}
Err(e) => return Err(storage_err(e)),
_ => {}
}
store
.enqueue(rk, &channel_id, payload.clone())
.map_err(storage_err)?
}
};
seqs.push(seq);
metrics::record_enqueue_total();
metrics::record_enqueue_bytes(payload.len() as u64);
crate::auth::waiter(&waiters, rk).notify_waiters();
}
let seq = match self
.store
.enqueue(&rk, &channel_id, payload.clone())
.map_err(storage_err)
{
Ok(seq) => seq,
Err(e) => return Promise::err(e),
};
seqs.push(seq);
let mut list = results.get().init_seqs(seqs.len() as u32);
for (i, seq) in seqs.iter().enumerate() {
list.set(i as u32, *seq);
}
metrics::record_enqueue_total();
metrics::record_enqueue_bytes(payload.len() as u64);
tracing::info!(
recipient_count = n,
payload_len = payload.len(),
"audit: batch_enqueue"
);
crate::auth::waiter(&self.waiters, &rk).notify_waiters();
}
let mut list = results.get().init_seqs(seqs.len() as u32);
for (i, seq) in seqs.iter().enumerate() {
list.set(i as u32, *seq);
}
tracing::info!(
recipient_count = recipient_keys.len(),
payload_len = payload.len(),
"audit: batch_enqueue"
);
Promise::ok(())
Ok(())
})
}
}