- Add createChannel RPC (node.capnp @18): create 1:1 channel, returns 16-byte channelId - Store: create_channel(member_a, member_b), get_channel_members(channel_id) - FileBackedStore: channels.bin; SqlStore: migration 003_channels, schema v4 - channel_ops: handle_create_channel (auth + identity, peerKey 32 bytes) - Delivery authz: when channel_id.len() == 16, require caller and recipient are channel members (E022/E023) - Error codes E022 CHANNEL_ACCESS_DENIED, E023 CHANNEL_NOT_FOUND - SUMMARY: link Certificate lifecycle; security audit, future improvements, multi-agent plan docs - Certificate lifecycle doc, SECURITY-AUDIT, FUTURE-IMPROVEMENTS, MULTI-AGENT-WORK-PLAN - Client/core/tls/auth/server main: assorted fixes and updates from review and audit Co-authored-by: Cursor <cursoragent@cursor.com>
317 lines
10 KiB
Rust
317 lines
10 KiB
Rust
use std::sync::Arc;
|
|
use std::time::Duration;
|
|
|
|
use capnp_rpc::RpcSystem;
|
|
use dashmap::DashMap;
|
|
use opaque_ke::ServerSetup;
|
|
use quicnprotochat_core::opaque_auth::OpaqueSuite;
|
|
use quicnprotochat_proto::node_capnp::node_service;
|
|
use tokio::sync::Notify;
|
|
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
|
|
|
|
use crate::auth::{
|
|
current_timestamp, AuthConfig, PendingLogin, RateEntry, SessionInfo,
|
|
PENDING_LOGIN_TTL_SECS, RATE_LIMIT_WINDOW_SECS,
|
|
};
|
|
use crate::storage::Store;
|
|
|
|
/// Cap'n Proto traversal limit (words). 4 Mi words = 32 MiB; bounds DoS from deeply nested or large messages.
|
|
const CAPNP_TRAVERSAL_LIMIT_WORDS: usize = 4 * 1024 * 1024;
|
|
|
|
mod auth_ops;
|
|
mod channel_ops;
|
|
mod delivery;
|
|
mod key_ops;
|
|
mod p2p_ops;
|
|
|
|
impl node_service::Server for NodeServiceImpl {
|
|
fn upload_key_package(
|
|
&mut self,
|
|
params: node_service::UploadKeyPackageParams,
|
|
results: node_service::UploadKeyPackageResults,
|
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
|
self.handle_upload_key_package(params, results)
|
|
}
|
|
|
|
fn fetch_key_package(
|
|
&mut self,
|
|
params: node_service::FetchKeyPackageParams,
|
|
results: node_service::FetchKeyPackageResults,
|
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
|
self.handle_fetch_key_package(params, results)
|
|
}
|
|
|
|
fn enqueue(
|
|
&mut self,
|
|
params: node_service::EnqueueParams,
|
|
results: node_service::EnqueueResults,
|
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
|
self.handle_enqueue(params, results)
|
|
}
|
|
|
|
fn fetch(
|
|
&mut self,
|
|
params: node_service::FetchParams,
|
|
results: node_service::FetchResults,
|
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
|
self.handle_fetch(params, results)
|
|
}
|
|
|
|
fn fetch_wait(
|
|
&mut self,
|
|
params: node_service::FetchWaitParams,
|
|
results: node_service::FetchWaitResults,
|
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
|
self.handle_fetch_wait(params, results)
|
|
}
|
|
|
|
fn health(
|
|
&mut self,
|
|
params: node_service::HealthParams,
|
|
results: node_service::HealthResults,
|
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
|
self.handle_health(params, results)
|
|
}
|
|
|
|
fn upload_hybrid_key(
|
|
&mut self,
|
|
params: node_service::UploadHybridKeyParams,
|
|
results: node_service::UploadHybridKeyResults,
|
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
|
self.handle_upload_hybrid_key(params, results)
|
|
}
|
|
|
|
fn fetch_hybrid_key(
|
|
&mut self,
|
|
params: node_service::FetchHybridKeyParams,
|
|
results: node_service::FetchHybridKeyResults,
|
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
|
self.handle_fetch_hybrid_key(params, results)
|
|
}
|
|
|
|
fn opaque_login_start(
|
|
&mut self,
|
|
params: node_service::OpaqueLoginStartParams,
|
|
results: node_service::OpaqueLoginStartResults,
|
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
|
self.handle_opaque_login_start(params, results)
|
|
}
|
|
|
|
fn opaque_register_start(
|
|
&mut self,
|
|
params: node_service::OpaqueRegisterStartParams,
|
|
results: node_service::OpaqueRegisterStartResults,
|
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
|
self.handle_opaque_register_start(params, results)
|
|
}
|
|
|
|
fn opaque_login_finish(
|
|
&mut self,
|
|
params: node_service::OpaqueLoginFinishParams,
|
|
results: node_service::OpaqueLoginFinishResults,
|
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
|
self.handle_opaque_login_finish(params, results)
|
|
}
|
|
|
|
fn opaque_register_finish(
|
|
&mut self,
|
|
params: node_service::OpaqueRegisterFinishParams,
|
|
results: node_service::OpaqueRegisterFinishResults,
|
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
|
self.handle_opaque_register_finish(params, results)
|
|
}
|
|
|
|
fn publish_endpoint(
|
|
&mut self,
|
|
params: node_service::PublishEndpointParams,
|
|
results: node_service::PublishEndpointResults,
|
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
|
self.handle_publish_endpoint(params, results)
|
|
}
|
|
|
|
fn resolve_endpoint(
|
|
&mut self,
|
|
params: node_service::ResolveEndpointParams,
|
|
results: node_service::ResolveEndpointResults,
|
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
|
self.handle_resolve_endpoint(params, results)
|
|
}
|
|
|
|
fn peek(
|
|
&mut self,
|
|
params: node_service::PeekParams,
|
|
results: node_service::PeekResults,
|
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
|
self.handle_peek(params, results)
|
|
}
|
|
|
|
fn ack(
|
|
&mut self,
|
|
params: node_service::AckParams,
|
|
results: node_service::AckResults,
|
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
|
self.handle_ack(params, results)
|
|
}
|
|
|
|
fn fetch_hybrid_keys(
|
|
&mut self,
|
|
params: node_service::FetchHybridKeysParams,
|
|
results: node_service::FetchHybridKeysResults,
|
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
|
self.handle_fetch_hybrid_keys(params, results)
|
|
}
|
|
|
|
fn batch_enqueue(
|
|
&mut self,
|
|
params: node_service::BatchEnqueueParams,
|
|
results: node_service::BatchEnqueueResults,
|
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
|
self.handle_batch_enqueue(params, results)
|
|
}
|
|
|
|
fn create_channel(
|
|
&mut self,
|
|
params: node_service::CreateChannelParams,
|
|
results: node_service::CreateChannelResults,
|
|
) -> capnp::capability::Promise<(), capnp::Error> {
|
|
self.handle_create_channel(params, results)
|
|
}
|
|
}
|
|
|
|
pub const CURRENT_WIRE_VERSION: u16 = 1;
|
|
|
|
pub struct NodeServiceImpl {
|
|
pub store: Arc<dyn Store>,
|
|
pub waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
|
|
pub auth_cfg: Arc<AuthConfig>,
|
|
pub opaque_setup: Arc<ServerSetup<OpaqueSuite>>,
|
|
pub pending_logins: Arc<DashMap<String, PendingLogin>>,
|
|
pub sessions: Arc<DashMap<Vec<u8>, SessionInfo>>,
|
|
pub rate_limits: Arc<DashMap<Vec<u8>, RateEntry>>,
|
|
/// When true, enqueue does not require identity-bound session (Sealed Sender).
|
|
pub sealed_sender: bool,
|
|
}
|
|
|
|
impl NodeServiceImpl {
|
|
pub fn new(
|
|
store: Arc<dyn Store>,
|
|
waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
|
|
auth_cfg: Arc<AuthConfig>,
|
|
opaque_setup: Arc<ServerSetup<OpaqueSuite>>,
|
|
pending_logins: Arc<DashMap<String, PendingLogin>>,
|
|
sessions: Arc<DashMap<Vec<u8>, SessionInfo>>,
|
|
rate_limits: Arc<DashMap<Vec<u8>, RateEntry>>,
|
|
sealed_sender: bool,
|
|
) -> Self {
|
|
Self {
|
|
store,
|
|
waiters,
|
|
auth_cfg,
|
|
opaque_setup,
|
|
pending_logins,
|
|
sessions,
|
|
rate_limits,
|
|
sealed_sender,
|
|
}
|
|
}
|
|
}
|
|
|
|
pub async fn handle_node_connection(
|
|
connecting: quinn::Connecting,
|
|
store: Arc<dyn Store>,
|
|
waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
|
|
auth_cfg: Arc<AuthConfig>,
|
|
opaque_setup: Arc<ServerSetup<OpaqueSuite>>,
|
|
pending_logins: Arc<DashMap<String, PendingLogin>>,
|
|
sessions: Arc<DashMap<Vec<u8>, SessionInfo>>,
|
|
rate_limits: Arc<DashMap<Vec<u8>, RateEntry>>,
|
|
sealed_sender: bool,
|
|
) -> Result<(), anyhow::Error> {
|
|
let connection = connecting.await?;
|
|
|
|
tracing::info!(peer = %connection.remote_address(), "QUIC connected");
|
|
|
|
let (send, recv) = connection
|
|
.accept_bi()
|
|
.await
|
|
.map_err(|e| anyhow::anyhow!("failed to accept bi stream: {e}"))?;
|
|
let (reader, writer) = (recv.compat(), send.compat_write());
|
|
|
|
let mut reader_opts = capnp::message::ReaderOptions::new();
|
|
reader_opts.traversal_limit_in_words(Some(CAPNP_TRAVERSAL_LIMIT_WORDS));
|
|
let network = capnp_rpc::twoparty::VatNetwork::new(
|
|
reader,
|
|
writer,
|
|
capnp_rpc::rpc_twoparty_capnp::Side::Server,
|
|
reader_opts,
|
|
);
|
|
|
|
let service: node_service::Client = capnp_rpc::new_client(NodeServiceImpl::new(
|
|
store,
|
|
waiters,
|
|
auth_cfg,
|
|
opaque_setup,
|
|
pending_logins,
|
|
sessions,
|
|
rate_limits,
|
|
sealed_sender,
|
|
));
|
|
|
|
RpcSystem::new(Box::new(network), Some(service.client))
|
|
.await
|
|
.map_err(|e| anyhow::anyhow!("NodeService RPC error: {e}"))
|
|
}
|
|
|
|
const MESSAGE_TTL_SECS: u64 = 7 * 24 * 60 * 60; // 7 days
|
|
|
|
pub fn spawn_cleanup_task(
|
|
sessions: Arc<DashMap<Vec<u8>, SessionInfo>>,
|
|
pending_logins: Arc<DashMap<String, PendingLogin>>,
|
|
rate_limits: Arc<DashMap<Vec<u8>, RateEntry>>,
|
|
store: Arc<dyn Store>,
|
|
waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
|
|
) {
|
|
tokio::spawn(async move {
|
|
let mut interval = tokio::time::interval(Duration::from_secs(60));
|
|
loop {
|
|
interval.tick().await;
|
|
let now = current_timestamp();
|
|
|
|
sessions.retain(|_, info| info.expires_at > now);
|
|
pending_logins.retain(|_, pl| now - pl.created_at < PENDING_LOGIN_TTL_SECS);
|
|
rate_limits.retain(|_, entry| now - entry.window_start < RATE_LIMIT_WINDOW_SECS * 2);
|
|
|
|
// Bound map sizes to prevent unbounded growth from malicious clients.
|
|
const MAX_SESSIONS: usize = 100_000;
|
|
const MAX_WAITERS: usize = 100_000;
|
|
if sessions.len() > MAX_SESSIONS {
|
|
let overflow = sessions.len() - MAX_SESSIONS;
|
|
let mut entries: Vec<_> = sessions
|
|
.iter()
|
|
.map(|e| (e.key().clone(), e.expires_at))
|
|
.collect();
|
|
entries.sort_by_key(|(_, exp)| *exp);
|
|
for (key, _) in entries.into_iter().take(overflow) {
|
|
sessions.remove(&key);
|
|
}
|
|
}
|
|
if waiters.len() > MAX_WAITERS {
|
|
let overflow = waiters.len() - MAX_WAITERS;
|
|
let keys: Vec<_> =
|
|
waiters.iter().take(overflow).map(|e| e.key().clone()).collect();
|
|
for key in keys {
|
|
waiters.remove(&key);
|
|
}
|
|
}
|
|
|
|
match store.gc_expired_messages(MESSAGE_TTL_SECS) {
|
|
Ok(n) if n > 0 => {
|
|
tracing::debug!(expired = n, "garbage collected expired messages")
|
|
}
|
|
Err(e) => tracing::warn!(error = %e, "message GC failed"),
|
|
_ => {}
|
|
}
|
|
}
|
|
});
|
|
}
|