Rename project to quicnprotochat

This commit is contained in:
2026-02-21 23:37:40 +01:00
parent c9d295c510
commit 3bf3ab23e2
32 changed files with 3370 additions and 1132 deletions

View File

@@ -1,313 +0,0 @@
//! noiseml CLI client.
//!
//! # Subcommands
//!
//! | Subcommand | Description |
//! |--------------|----------------------------------------------------------|
//! | `ping` | Send a Ping to the server, print RTT |
//! | `register` | Generate a KeyPackage and upload it to the AS |
//! | `fetch-key` | Fetch a peer's KeyPackage from the AS by identity key |
//!
//! # Configuration
//!
//! | Env var | CLI flag | Default |
//! |-----------------|--------------|---------------------|
//! | `NOISEML_SERVER`| `--server` | `127.0.0.1:7000` |
//! | `RUST_LOG` | — | `warn` |
use anyhow::Context;
use capnp_rpc::{RpcSystem, rpc_twoparty_capnp::Side, twoparty};
use clap::{Parser, Subcommand};
use tokio::net::TcpStream;
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
use noiseml_core::{IdentityKeypair, NoiseKeypair, generate_key_package, handshake_initiator};
use noiseml_proto::{MsgType, ParsedEnvelope, auth_capnp::authentication_service};
// ── CLI ───────────────────────────────────────────────────────────────────────
#[derive(Debug, Parser)]
#[command(name = "noiseml", about = "noiseml CLI client", version)]
struct Args {
#[command(subcommand)]
command: Command,
}
#[derive(Debug, Subcommand)]
enum Command {
/// Send a Ping to the server and print the round-trip time.
Ping {
/// Server address (host:port).
#[arg(long, default_value = "127.0.0.1:7000", env = "NOISEML_SERVER")]
server: String,
},
/// Generate a fresh MLS KeyPackage and upload it to the Authentication Service.
///
/// Prints the SHA-256 fingerprint of the uploaded package and the raw
/// Ed25519 identity public key bytes (hex), which peers need to fetch it.
Register {
/// Server address (host:port).
#[arg(long, default_value = "127.0.0.1:7000", env = "NOISEML_SERVER")]
server: String,
},
/// Fetch a peer's KeyPackage from the Authentication Service.
///
/// IDENTITY_KEY is the peer's Ed25519 public key encoded as 64 lowercase
/// hex characters (32 bytes).
FetchKey {
/// Server address (host:port).
#[arg(long, default_value = "127.0.0.1:7000", env = "NOISEML_SERVER")]
server: String,
/// Target peer's Ed25519 identity public key (64 hex chars = 32 bytes).
identity_key: String,
},
}
// ── Entry point ───────────────────────────────────────────────────────────────
#[tokio::main]
async fn main() -> anyhow::Result<()> {
tracing_subscriber::fmt()
.with_env_filter(
tracing_subscriber::EnvFilter::try_from_default_env()
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("warn")),
)
.init();
let args = Args::parse();
match args.command {
Command::Ping { server } => cmd_ping(&server).await,
Command::Register { server } => {
let local = tokio::task::LocalSet::new();
local.run_until(cmd_register(&server)).await
}
Command::FetchKey {
server,
identity_key,
} => {
let local = tokio::task::LocalSet::new();
local.run_until(cmd_fetch_key(&server, &identity_key)).await
}
}
}
// ── Subcommand implementations ────────────────────────────────────────────────
/// Connect to `server`, complete Noise_XX, send a Ping, and print RTT.
async fn cmd_ping(server: &str) -> anyhow::Result<()> {
let keypair = NoiseKeypair::generate();
let stream = TcpStream::connect(server)
.await
.with_context(|| format!("could not connect to {server}"))?;
tracing::debug!(server = %server, "TCP connection established");
let mut transport = handshake_initiator(stream, &keypair)
.await
.context("Noise_XX handshake failed")?;
{
let remote = transport
.remote_static_public_key()
.map(|k| fmt_hex(&k[..4]))
.unwrap_or_else(|| "unknown".into());
tracing::debug!(server_key = %remote, "handshake complete");
}
let sent_at = current_timestamp_ms();
transport
.send_envelope(&ParsedEnvelope {
msg_type: MsgType::Ping,
group_id: vec![],
sender_id: vec![],
payload: vec![],
timestamp_ms: sent_at,
})
.await
.context("failed to send Ping")?;
tracing::debug!("Ping sent");
let response = transport
.recv_envelope()
.await
.context("failed to receive Pong")?;
match response.msg_type {
MsgType::Pong => {
let rtt_ms = current_timestamp_ms().saturating_sub(sent_at);
println!("Pong from {server} rtt={rtt_ms}ms");
Ok(())
}
_ => anyhow::bail!(
"protocol error: expected Pong from {server}, got unexpected message type"
),
}
}
/// Generate a KeyPackage for a fresh identity and upload it to the AS.
///
/// Must run on a `LocalSet` because capnp-rpc is `!Send`.
async fn cmd_register(server: &str) -> anyhow::Result<()> {
let noise_keypair = NoiseKeypair::generate();
let identity = IdentityKeypair::generate();
let (tls_bytes, fingerprint) =
generate_key_package(&identity).context("KeyPackage generation failed")?;
let as_client = connect_as(server, &noise_keypair).await?;
let mut req = as_client.upload_key_package_request();
req.get().set_identity_key(&identity.public_key_bytes());
req.get().set_package(&tls_bytes);
let response = req
.send()
.promise
.await
.context("upload_key_package RPC failed")?;
let server_fp = response
.get()
.context("upload_key_package: bad response")?
.get_fingerprint()
.context("upload_key_package: missing fingerprint")?
.to_vec();
// Verify the server echoed the same fingerprint.
anyhow::ensure!(
server_fp == fingerprint,
"fingerprint mismatch: local={} server={}",
hex::encode(&fingerprint),
hex::encode(&server_fp),
);
println!("identity_key : {}", hex::encode(identity.public_key_bytes()));
println!("fingerprint : {}", hex::encode(&fingerprint));
println!("KeyPackage uploaded successfully.");
Ok(())
}
/// Fetch a peer's KeyPackage from the AS by their hex-encoded identity key.
///
/// Must run on a `LocalSet` because capnp-rpc is `!Send`.
async fn cmd_fetch_key(server: &str, identity_key_hex: &str) -> anyhow::Result<()> {
let identity_key = hex::decode(identity_key_hex)
.map_err(|e| anyhow::anyhow!(e))
.context("identity_key must be 64 hex characters (32 bytes)")?;
anyhow::ensure!(
identity_key.len() == 32,
"identity_key must be exactly 32 bytes, got {}",
identity_key.len()
);
let noise_keypair = NoiseKeypair::generate();
let as_client = connect_as(server, &noise_keypair).await?;
let mut req = as_client.fetch_key_package_request();
req.get().set_identity_key(&identity_key);
let response = req
.send()
.promise
.await
.context("fetch_key_package RPC failed")?;
let package = response
.get()
.context("fetch_key_package: bad response")?
.get_package()
.context("fetch_key_package: missing package field")?
.to_vec();
if package.is_empty() {
println!("No KeyPackage available for this identity.");
return Ok(());
}
use sha2::{Digest, Sha256};
let fingerprint = Sha256::digest(&package);
println!("fingerprint : {}", hex::encode(fingerprint));
println!("package_len : {} bytes", package.len());
println!("KeyPackage fetched successfully.");
Ok(())
}
// ── Shared helpers ────────────────────────────────────────────────────────────
/// Establish a Noise_XX connection and return an `AuthenticationService` client.
///
/// Must be called from within a `LocalSet` because capnp-rpc is `!Send`.
async fn connect_as(
server: &str,
noise_keypair: &NoiseKeypair,
) -> anyhow::Result<authentication_service::Client> {
let stream = TcpStream::connect(server)
.await
.with_context(|| format!("could not connect to {server}"))?;
let transport = handshake_initiator(stream, noise_keypair)
.await
.context("Noise_XX handshake failed")?;
let (reader, writer) = transport.into_capnp_io();
let network = twoparty::VatNetwork::new(
reader.compat(),
writer.compat_write(),
Side::Client,
Default::default(),
);
let mut rpc_system = RpcSystem::new(Box::new(network), None);
let as_client: authentication_service::Client =
rpc_system.bootstrap(Side::Server);
// Drive the RPC system on the local set.
tokio::task::spawn_local(rpc_system);
Ok(as_client)
}
/// Format the first `n` bytes as lowercase hex with a trailing `…`.
fn fmt_hex(bytes: &[u8]) -> String {
let hex: String = bytes.iter().map(|b| format!("{b:02x}")).collect();
format!("{hex}")
}
/// Return the current Unix timestamp in milliseconds.
fn current_timestamp_ms() -> u64 {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis() as u64
}
// ── Hex encoding helper ───────────────────────────────────────────────────────
//
// We use a tiny inline module rather than adding `hex` as a dependency.
mod hex {
pub fn encode(bytes: impl AsRef<[u8]>) -> String {
bytes.as_ref().iter().map(|b| format!("{b:02x}")).collect()
}
pub fn decode(s: &str) -> Result<Vec<u8>, &'static str> {
if s.len() % 2 != 0 {
return Err("odd-length hex string");
}
(0..s.len())
.step_by(2)
.map(|i| u8::from_str_radix(&s[i..i + 2], 16).map_err(|_| "invalid hex character"))
.collect()
}
}

View File

@@ -1,460 +0,0 @@
//! noiseml-server — Delivery Service + Authentication Service binary.
//!
//! # M3 scope
//!
//! The server exposes two Noise_XX-protected Cap'n Proto RPC endpoints:
//!
//! * **AS** (`--listen`, default `0.0.0.0:7000`) — `AuthenticationService`:
//! upload and fetch single-use MLS KeyPackages.
//! * **DS** (`--ds-listen`, default `0.0.0.0:7001`) — `DeliveryService`:
//! enqueue and fetch opaque payloads (Welcome messages, Commits, Application
//! messages) keyed by recipient Ed25519 public key.
//!
//! # Architecture
//!
//! ```text
//! TcpListener (AS, 7000) TcpListener (DS, 7001)
//! └─ Noise_XX handshake └─ Noise_XX handshake
//! └─ capnp-rpc VatNetwork (LocalSet, !Send)
//! ├─ AuthServiceImpl (shares KeyPackageStore via Arc)
//! └─ DeliveryServiceImpl (shares DeliveryStore via Arc)
//! ```
//!
//! Because `capnp-rpc` uses `Rc<RefCell<>>` internally it is `!Send`.
//! The entire RPC stack lives on a `tokio::task::LocalSet` spawned per
//! connection.
//!
//! # Configuration
//!
//! | Env var | CLI flag | Default |
//! |---------------------|----------------|-----------------|
//! | `NOISEML_LISTEN` | `--listen` | `0.0.0.0:7000` |
//! | `NOISEML_DS_LISTEN` | `--ds-listen` | `0.0.0.0:7001` |
//! | `RUST_LOG` | — | `info` |
use std::{collections::VecDeque, sync::Arc};
use anyhow::Context;
use capnp::capability::Promise;
use capnp_rpc::{RpcSystem, rpc_twoparty_capnp::Side, twoparty};
use clap::Parser;
use dashmap::DashMap;
use noiseml_core::{NoiseKeypair, handshake_responder};
use noiseml_proto::{
auth_capnp::authentication_service,
delivery_capnp::delivery_service,
};
use sha2::{Digest, Sha256};
use tokio::net::{TcpListener, TcpStream};
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
use tracing::Instrument;
// ── CLI ───────────────────────────────────────────────────────────────────────
#[derive(Debug, Parser)]
#[command(
name = "noiseml-server",
about = "noiseml Delivery Service + Authentication Service",
version
)]
struct Args {
/// TCP address for the Authentication Service.
#[arg(long, default_value = "0.0.0.0:7000", env = "NOISEML_LISTEN")]
listen: String,
/// TCP address for the Delivery Service.
#[arg(long, default_value = "0.0.0.0:7001", env = "NOISEML_DS_LISTEN")]
ds_listen: String,
}
// ── Shared store types ────────────────────────────────────────────────────────
/// Thread-safe map from Ed25519 identity public key bytes (32 B) to a queue
/// of serialised MLS KeyPackage blobs.
///
/// Each KeyPackage is single-use per RFC 9420: `fetch_key_package` removes
/// and returns exactly one entry.
type KeyPackageStore = Arc<DashMap<Vec<u8>, VecDeque<Vec<u8>>>>;
/// Thread-safe message queue for the Delivery Service.
///
/// Maps recipient Ed25519 public key (32 bytes) to a FIFO queue of opaque
/// payload bytes (TLS-encoded MLS messages or other framed data).
type DeliveryStore = Arc<DashMap<Vec<u8>, VecDeque<Vec<u8>>>>;
// ── Authentication Service implementation ─────────────────────────────────────
/// Cap'n Proto RPC server implementation for `AuthenticationService`.
struct AuthServiceImpl {
store: KeyPackageStore,
}
impl authentication_service::Server for AuthServiceImpl {
/// Upload a single-use KeyPackage and return its SHA-256 fingerprint.
fn upload_key_package(
&mut self,
params: authentication_service::UploadKeyPackageParams,
mut results: authentication_service::UploadKeyPackageResults,
) -> Promise<(), capnp::Error> {
let params = params.get().map_err(|e| {
capnp::Error::failed(format!("upload_key_package: bad params: {e}"))
});
let (identity_key, package) = match params {
Ok(p) => {
let ik = match p.get_identity_key() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
};
let pkg = match p.get_package() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
};
(ik, pkg)
}
Err(e) => return Promise::err(e),
};
if identity_key.len() != 32 {
return Promise::err(capnp::Error::failed(format!(
"identityKey must be exactly 32 bytes, got {}",
identity_key.len()
)));
}
if package.is_empty() {
return Promise::err(capnp::Error::failed(
"package must not be empty".to_string(),
));
}
let fingerprint: Vec<u8> = Sha256::digest(&package).to_vec();
self.store
.entry(identity_key)
.or_default()
.push_back(package);
results
.get()
.set_fingerprint(&fingerprint);
tracing::debug!(
fingerprint = %fmt_hex(&fingerprint[..4]),
"KeyPackage uploaded"
);
Promise::ok(())
}
/// Atomically remove and return one KeyPackage for the given identity key.
fn fetch_key_package(
&mut self,
params: authentication_service::FetchKeyPackageParams,
mut results: authentication_service::FetchKeyPackageResults,
) -> Promise<(), capnp::Error> {
let identity_key = match params.get() {
Ok(p) => match p.get_identity_key() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
},
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
};
if identity_key.len() != 32 {
return Promise::err(capnp::Error::failed(format!(
"identityKey must be exactly 32 bytes, got {}",
identity_key.len()
)));
}
// Atomically pop one package from the front of the queue.
let package = self
.store
.get_mut(&identity_key)
.and_then(|mut q| q.pop_front());
match package {
Some(pkg) => {
tracing::debug!(
identity = %fmt_hex(&identity_key[..4]),
"KeyPackage fetched"
);
results.get().set_package(&pkg);
}
None => {
tracing::debug!(
identity = %fmt_hex(&identity_key[..4]),
"no KeyPackage available for identity"
);
// Return empty Data — schema specifies this as the "no package" sentinel.
results.get().set_package(&[]);
}
}
Promise::ok(())
}
}
// ── Delivery Service implementation ───────────────────────────────────────────
/// Cap'n Proto RPC server implementation for `DeliveryService`.
///
/// Provides a simple store-and-forward relay for MLS messages:
/// * `enqueue` appends an opaque payload to the recipient's FIFO queue.
/// * `fetch` atomically drains and returns the entire queue.
struct DeliveryServiceImpl {
store: DeliveryStore,
}
impl delivery_service::Server for DeliveryServiceImpl {
/// Append `payload` to the queue for `recipient_key`.
fn enqueue(
&mut self,
params: delivery_service::EnqueueParams,
_results: delivery_service::EnqueueResults,
) -> Promise<(), capnp::Error> {
let p = match params.get() {
Ok(p) => p,
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
};
let recipient_key = match p.get_recipient_key() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
};
let payload = match p.get_payload() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
};
if recipient_key.len() != 32 {
return Promise::err(capnp::Error::failed(format!(
"recipientKey must be exactly 32 bytes, got {}",
recipient_key.len()
)));
}
if payload.is_empty() {
return Promise::err(capnp::Error::failed(
"payload must not be empty".to_string(),
));
}
self.store
.entry(recipient_key.clone())
.or_default()
.push_back(payload);
tracing::debug!(
recipient = %fmt_hex(&recipient_key[..4]),
"message enqueued"
);
Promise::ok(())
}
/// Atomically drain and return all queued payloads for `recipient_key`.
fn fetch(
&mut self,
params: delivery_service::FetchParams,
mut results: delivery_service::FetchResults,
) -> Promise<(), capnp::Error> {
let recipient_key = match params.get() {
Ok(p) => match p.get_recipient_key() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
},
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
};
if recipient_key.len() != 32 {
return Promise::err(capnp::Error::failed(format!(
"recipientKey must be exactly 32 bytes, got {}",
recipient_key.len()
)));
}
// Atomically drain the entire queue.
let messages: Vec<Vec<u8>> = self
.store
.get_mut(&recipient_key)
.map(|mut q| q.drain(..).collect())
.unwrap_or_default();
tracing::debug!(
recipient = %fmt_hex(&recipient_key[..4]),
count = messages.len(),
"messages fetched"
);
let mut list = results.get().init_payloads(messages.len() as u32);
for (i, msg) in messages.iter().enumerate() {
list.set(i as u32, msg);
}
Promise::ok(())
}
}
// ── Entry point ───────────────────────────────────────────────────────────────
#[tokio::main]
async fn main() -> anyhow::Result<()> {
tracing_subscriber::fmt()
.with_env_filter(
tracing_subscriber::EnvFilter::try_from_default_env()
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")),
)
.init();
let args = Args::parse();
// Generate a fresh static Noise keypair for this server instance.
// M6 replaces this with persistent key loading from SQLite.
let keypair = Arc::new(NoiseKeypair::generate());
{
let pub_bytes = keypair.public_bytes();
tracing::info!(
listen = %args.listen,
ds_listen = %args.ds_listen,
public_key = %fmt_hex(&pub_bytes[..4]),
"noiseml-server starting (M3) — keypair is ephemeral"
);
}
// Shared stores — all connections share the same in-memory maps.
let kp_store: KeyPackageStore = Arc::new(DashMap::new());
let ds_store: DeliveryStore = Arc::new(DashMap::new());
let as_listener = TcpListener::bind(&args.listen)
.await
.with_context(|| format!("failed to bind AS to {}", args.listen))?;
let ds_listener = TcpListener::bind(&args.ds_listen)
.await
.with_context(|| format!("failed to bind DS to {}", args.ds_listen))?;
tracing::info!(
as_addr = %args.listen,
ds_addr = %args.ds_listen,
"accepting connections"
);
// capnp-rpc is !Send (Rc internals), so all RPC tasks must stay on a
// LocalSet. Both accept loops share one LocalSet.
let local = tokio::task::LocalSet::new();
local
.run_until(async move {
loop {
tokio::select! {
result = as_listener.accept() => {
let (stream, peer_addr) = result.context("AS accept failed")?;
let keypair = Arc::clone(&keypair);
let store = Arc::clone(&kp_store);
tokio::task::spawn_local(
async move {
match handle_as_connection(stream, keypair, store).await {
Ok(()) => tracing::debug!("AS connection closed"),
Err(e) => tracing::warn!(error = %e, "AS connection error"),
}
}
.instrument(tracing::info_span!("as_conn", peer = %peer_addr)),
);
}
result = ds_listener.accept() => {
let (stream, peer_addr) = result.context("DS accept failed")?;
let keypair = Arc::clone(&keypair);
let store = Arc::clone(&ds_store);
tokio::task::spawn_local(
async move {
match handle_ds_connection(stream, keypair, store).await {
Ok(()) => tracing::debug!("DS connection closed"),
Err(e) => tracing::warn!(error = %e, "DS connection error"),
}
}
.instrument(tracing::info_span!("ds_conn", peer = %peer_addr)),
);
}
}
}
#[allow(unreachable_code)]
Ok::<(), anyhow::Error>(())
})
.await
}
// ── Per-connection handlers ───────────────────────────────────────────────────
/// Handle one Authentication Service connection.
async fn handle_as_connection(
stream: TcpStream,
keypair: Arc<NoiseKeypair>,
store: KeyPackageStore,
) -> Result<(), anyhow::Error> {
let transport = noise_handshake(stream, &keypair, "AS").await?;
let (reader, writer) = transport.into_capnp_io();
let network = twoparty::VatNetwork::new(
reader.compat(),
writer.compat_write(),
Side::Server,
Default::default(),
);
let service: authentication_service::Client =
capnp_rpc::new_client(AuthServiceImpl { store });
RpcSystem::new(Box::new(network), Some(service.client))
.await
.map_err(|e| anyhow::anyhow!("AS RPC error: {e}"))
}
/// Handle one Delivery Service connection.
async fn handle_ds_connection(
stream: TcpStream,
keypair: Arc<NoiseKeypair>,
store: DeliveryStore,
) -> Result<(), anyhow::Error> {
let transport = noise_handshake(stream, &keypair, "DS").await?;
let (reader, writer) = transport.into_capnp_io();
let network = twoparty::VatNetwork::new(
reader.compat(),
writer.compat_write(),
Side::Server,
Default::default(),
);
let service: delivery_service::Client =
capnp_rpc::new_client(DeliveryServiceImpl { store });
RpcSystem::new(Box::new(network), Some(service.client))
.await
.map_err(|e| anyhow::anyhow!("DS RPC error: {e}"))
}
/// Perform the Noise_XX handshake and log the remote key.
async fn noise_handshake(
stream: TcpStream,
keypair: &NoiseKeypair,
label: &str,
) -> anyhow::Result<noiseml_core::NoiseTransport> {
let transport = handshake_responder(stream, keypair)
.await
.map_err(|e| anyhow::anyhow!("{label} Noise handshake failed: {e}"))?;
let remote = transport
.remote_static_public_key()
.map(|k| fmt_hex(&k[..4]))
.unwrap_or_else(|| "unknown".into());
tracing::info!(remote_key = %remote, "{label} Noise_XX handshake complete");
Ok(transport)
}
// ── Helpers ───────────────────────────────────────────────────────────────────
/// Format the first `n` bytes of a slice as lowercase hex with a trailing `…`.
fn fmt_hex(bytes: &[u8]) -> String {
let hex: String = bytes.iter().map(|b| format!("{b:02x}")).collect();
format!("{hex}")
}

View File

@@ -1,17 +1,18 @@
[package]
name = "noiseml-client"
name = "quicnprotochat-client"
version = "0.1.0"
edition = "2021"
description = "CLI client for noiseml."
description = "CLI client for quicnprotochat."
license = "MIT"
[[bin]]
name = "noiseml"
name = "quicnprotochat"
path = "src/main.rs"
[dependencies]
noiseml-core = { path = "../noiseml-core" }
noiseml-proto = { path = "../noiseml-proto" }
quicnprotochat-core = { path = "../quicnprotochat-core" }
quicnprotochat-proto = { path = "../quicnprotochat-proto" }
openmls_rust_crypto = { workspace = true }
# Serialisation + RPC
capnp = { workspace = true }
@@ -21,6 +22,9 @@ capnp-rpc = { workspace = true }
tokio = { workspace = true }
tokio-util = { workspace = true }
futures = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
bincode = { workspace = true }
# Error handling
anyhow = { workspace = true }
@@ -28,6 +32,9 @@ thiserror = { workspace = true }
# Crypto — for fingerprint verification in fetch-key subcommand
sha2 = { workspace = true }
quinn = { workspace = true }
quinn-proto = { workspace = true }
rustls = { workspace = true }
# Logging
tracing = { workspace = true }
@@ -37,5 +44,5 @@ tracing-subscriber = { workspace = true }
clap = { workspace = true }
[dev-dependencies]
# Integration tests use noiseml-core, noiseml-proto, and capnp-rpc directly.
# Integration tests use quicnprotochat-core, quicnprotochat-proto, and capnp-rpc directly.
dashmap = { workspace = true }

File diff suppressed because it is too large Load Diff

View File

@@ -6,13 +6,12 @@
use std::{collections::VecDeque, sync::Arc};
use capnp::capability::Promise;
use capnp_rpc::{RpcSystem, rpc_twoparty_capnp::Side, twoparty};
use capnp_rpc::{rpc_twoparty_capnp::Side, twoparty, RpcSystem};
use dashmap::DashMap;
use noiseml_core::{
IdentityKeypair, NoiseKeypair, generate_key_package, handshake_initiator,
handshake_responder,
use quicnprotochat_core::{
generate_key_package, handshake_initiator, handshake_responder, IdentityKeypair, NoiseKeypair,
};
use noiseml_proto::auth_capnp::authentication_service;
use quicnprotochat_proto::auth_capnp::authentication_service;
use sha2::{Digest, Sha256};
use tokio::net::{TcpListener, TcpStream};
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
@@ -111,8 +110,7 @@ async fn serve_one(stream: TcpStream, keypair: Arc<NoiseKeypair>, store: Store)
Side::Server,
Default::default(),
);
let svc: authentication_service::Client =
capnp_rpc::new_client(TestAuthService { store });
let svc: authentication_service::Client = capnp_rpc::new_client(TestAuthService { store });
let rpc = RpcSystem::new(Box::new(network), Some(svc.client));
tokio::task::spawn_local(rpc).await.ok();
}
@@ -156,7 +154,8 @@ async fn upload_then_fetch_fingerprints_match() {
let alice = connect_client(addr).await;
let mut req = alice.upload_key_package_request();
req.get().set_identity_key(&alice_identity.public_key_bytes());
req.get()
.set_identity_key(&alice_identity.public_key_bytes());
req.get().set_package(&tls_bytes);
let resp = req.send().promise.await.unwrap();
let server_fp = resp.get().unwrap().get_fingerprint().unwrap().to_vec();
@@ -166,15 +165,22 @@ async fn upload_then_fetch_fingerprints_match() {
// Bob: fetch Alice's package by her identity key.
let bob = connect_client(addr).await;
let mut req2 = bob.fetch_key_package_request();
req2.get().set_identity_key(&alice_identity.public_key_bytes());
req2.get()
.set_identity_key(&alice_identity.public_key_bytes());
let resp2 = req2.send().promise.await.unwrap();
let fetched = resp2.get().unwrap().get_package().unwrap().to_vec();
assert!(!fetched.is_empty(), "fetched package must not be empty");
assert_eq!(fetched, tls_bytes, "fetched bytes must match uploaded bytes");
assert_eq!(
fetched, tls_bytes,
"fetched bytes must match uploaded bytes"
);
let fetched_fp: Vec<u8> = Sha256::digest(&fetched).to_vec();
assert_eq!(fetched_fp, local_fp, "fetched fingerprint must match uploaded");
assert_eq!(
fetched_fp, local_fp,
"fetched fingerprint must match uploaded"
);
})
.await;
}
@@ -234,7 +240,11 @@ async fn packages_consumed_in_fifo_order() {
.get_package()
.unwrap()
.to_vec();
assert_eq!(pkg1, vec![1u8, 2, 3], "first fetch must return first package");
assert_eq!(
pkg1,
vec![1u8, 2, 3],
"first fetch must return first package"
);
let client2 = connect_client(addr).await;
let mut req2 = client2.fetch_key_package_request();
@@ -249,7 +259,11 @@ async fn packages_consumed_in_fifo_order() {
.get_package()
.unwrap()
.to_vec();
assert_eq!(pkg2, vec![4u8, 5, 6], "second fetch must return second package");
assert_eq!(
pkg2,
vec![4u8, 5, 6],
"second fetch must return second package"
);
})
.await;
}

View File

@@ -0,0 +1,433 @@
//! Integration test: full MLS group flow via Authentication Service + Delivery Service.
//!
//! Steps:
//! - Start in-process AS and DS (Noise_XX + capnp-rpc) on a LocalSet.
//! - Alice and Bob generate KeyPackages and upload to AS.
//! - Alice fetches Bob's KeyPackage, creates a group, and invites Bob.
//! - Welcome + application messages traverse the Delivery Service.
//! - Both sides decrypt and confirm plaintext payloads.
use std::{collections::VecDeque, sync::Arc, time::Duration};
use anyhow::Context;
use capnp::capability::Promise;
use capnp_rpc::{rpc_twoparty_capnp::Side, twoparty, RpcSystem};
use dashmap::DashMap;
use quicnprotochat_core::{
handshake_initiator, handshake_responder, GroupMember, IdentityKeypair, NoiseKeypair,
};
use quicnprotochat_proto::{auth_capnp::authentication_service, delivery_capnp::delivery_service};
use sha2::{Digest, Sha256};
use tokio::net::{TcpListener, TcpStream};
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
type KeyPackageStore = Arc<DashMap<Vec<u8>, VecDeque<Vec<u8>>>>;
type DeliveryStore = Arc<DashMap<Vec<u8>, VecDeque<Vec<u8>>>>;
/// Full Alice↔Bob MLS round-trip against live AS + DS.
#[tokio::test]
async fn mls_group_end_to_end_round_trip() -> anyhow::Result<()> {
let local = tokio::task::LocalSet::new();
local
.run_until(async move {
let server_keypair = Arc::new(NoiseKeypair::generate());
let kp_store: KeyPackageStore = Arc::new(DashMap::new());
let ds_store: DeliveryStore = Arc::new(DashMap::new());
let as_addr =
spawn_as_server(2, Arc::clone(&server_keypair), Arc::clone(&kp_store)).await;
let ds_addr =
spawn_ds_server(2, Arc::clone(&server_keypair), Arc::clone(&ds_store)).await;
tokio::time::sleep(Duration::from_millis(10)).await;
let alice_id = Arc::new(IdentityKeypair::generate());
let bob_id = Arc::new(IdentityKeypair::generate());
let mut alice = GroupMember::new(Arc::clone(&alice_id));
let mut bob = GroupMember::new(Arc::clone(&bob_id));
let alice_kp = alice.generate_key_package()?;
let bob_kp = bob.generate_key_package()?;
let alice_as = connect_as(as_addr, &NoiseKeypair::generate()).await?;
let bob_as = connect_as(as_addr, &NoiseKeypair::generate()).await?;
upload_key_package(&alice_as, &alice_id.public_key_bytes(), &alice_kp).await?;
upload_key_package(&bob_as, &bob_id.public_key_bytes(), &bob_kp).await?;
let fetched_bob_kp = fetch_key_package(&alice_as, &bob_id.public_key_bytes()).await?;
anyhow::ensure!(
!fetched_bob_kp.is_empty(),
"AS must return Bob's KeyPackage"
);
alice.create_group(b"m3-integration")?;
let (_commit, welcome) = alice.add_member(&fetched_bob_kp)?;
let alice_ds = connect_ds(ds_addr, &NoiseKeypair::generate()).await?;
let bob_ds = connect_ds(ds_addr, &NoiseKeypair::generate()).await?;
enqueue(&alice_ds, &bob_id.public_key_bytes(), &welcome).await?;
let welcome_payloads = fetch_all(&bob_ds, &bob_id.public_key_bytes()).await?;
let welcome_bytes = welcome_payloads
.first()
.cloned()
.context("welcome must be present")?;
bob.join_group(&welcome_bytes)?;
let ct_ab = alice.send_message(b"hello bob")?;
enqueue(&alice_ds, &bob_id.public_key_bytes(), &ct_ab).await?;
let bob_msgs = fetch_all(&bob_ds, &bob_id.public_key_bytes()).await?;
let ab_plaintext = bob
.receive_message(bob_msgs.first().context("missing alice→bob payload")?)?
.context("alice→bob must be application message")?;
assert_eq!(ab_plaintext, b"hello bob");
let ct_ba = bob.send_message(b"hello alice")?;
enqueue(&bob_ds, &alice_id.public_key_bytes(), &ct_ba).await?;
let alice_msgs = fetch_all(&alice_ds, &alice_id.public_key_bytes()).await?;
let ba_plaintext = alice
.receive_message(alice_msgs.first().context("missing bob→alice payload")?)?
.context("bob→alice must be application message")?;
assert_eq!(ba_plaintext, b"hello alice");
Ok(())
})
.await
}
// ── Test helpers ────────────────────────────────────────────────────────────
async fn spawn_as_server(
n_connections: usize,
keypair: Arc<NoiseKeypair>,
store: KeyPackageStore,
) -> std::net::SocketAddr {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let addr = listener.local_addr().unwrap();
tokio::task::spawn_local(async move {
for _ in 0..n_connections {
let (stream, _) = listener.accept().await.unwrap();
let kp = Arc::clone(&keypair);
let st = Arc::clone(&store);
tokio::task::spawn_local(async move {
serve_as_connection(stream, kp, st).await;
});
}
});
addr
}
async fn serve_as_connection(
stream: TcpStream,
keypair: Arc<NoiseKeypair>,
store: KeyPackageStore,
) {
let transport = handshake_responder(stream, &keypair).await.unwrap();
let (reader, writer) = transport.into_capnp_io();
let network = twoparty::VatNetwork::new(
reader.compat(),
writer.compat_write(),
Side::Server,
Default::default(),
);
let service: authentication_service::Client = capnp_rpc::new_client(AuthService { store });
RpcSystem::new(Box::new(network), Some(service.client))
.await
.ok();
}
async fn spawn_ds_server(
n_connections: usize,
keypair: Arc<NoiseKeypair>,
store: DeliveryStore,
) -> std::net::SocketAddr {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let addr = listener.local_addr().unwrap();
tokio::task::spawn_local(async move {
for _ in 0..n_connections {
let (stream, _) = listener.accept().await.unwrap();
let kp = Arc::clone(&keypair);
let st = Arc::clone(&store);
tokio::task::spawn_local(async move {
serve_ds_connection(stream, kp, st).await;
});
}
});
addr
}
async fn serve_ds_connection(stream: TcpStream, keypair: Arc<NoiseKeypair>, store: DeliveryStore) {
let transport = handshake_responder(stream, &keypair).await.unwrap();
let (reader, writer) = transport.into_capnp_io();
let network = twoparty::VatNetwork::new(
reader.compat(),
writer.compat_write(),
Side::Server,
Default::default(),
);
let service: delivery_service::Client = capnp_rpc::new_client(DeliveryService { store });
RpcSystem::new(Box::new(network), Some(service.client))
.await
.ok();
}
async fn connect_as(
addr: std::net::SocketAddr,
noise_keypair: &NoiseKeypair,
) -> anyhow::Result<authentication_service::Client> {
let stream = TcpStream::connect(addr)
.await
.with_context(|| format!("could not connect to AS at {addr}"))?;
let transport = handshake_initiator(stream, noise_keypair)
.await
.context("Noise handshake to AS failed")?;
let (reader, writer) = transport.into_capnp_io();
let network = twoparty::VatNetwork::new(
reader.compat(),
writer.compat_write(),
Side::Client,
Default::default(),
);
let mut rpc = RpcSystem::new(Box::new(network), None);
let client: authentication_service::Client = rpc.bootstrap(Side::Server);
tokio::task::spawn_local(rpc);
Ok(client)
}
async fn connect_ds(
addr: std::net::SocketAddr,
noise_keypair: &NoiseKeypair,
) -> anyhow::Result<delivery_service::Client> {
let stream = TcpStream::connect(addr)
.await
.with_context(|| format!("could not connect to DS at {addr}"))?;
let transport = handshake_initiator(stream, noise_keypair)
.await
.context("Noise handshake to DS failed")?;
let (reader, writer) = transport.into_capnp_io();
let network = twoparty::VatNetwork::new(
reader.compat(),
writer.compat_write(),
Side::Client,
Default::default(),
);
let mut rpc = RpcSystem::new(Box::new(network), None);
let client: delivery_service::Client = rpc.bootstrap(Side::Server);
tokio::task::spawn_local(rpc);
Ok(client)
}
async fn upload_key_package(
as_client: &authentication_service::Client,
identity_key: &[u8],
package: &[u8],
) -> anyhow::Result<()> {
let mut req = as_client.upload_key_package_request();
req.get().set_identity_key(identity_key);
req.get().set_package(package);
let resp = req
.send()
.promise
.await
.context("upload_key_package RPC failed")?;
let server_fp = resp
.get()
.context("upload_key_package: bad response")?
.get_fingerprint()
.context("upload_key_package: missing fingerprint")?
.to_vec();
let local_fp: Vec<u8> = Sha256::digest(package).to_vec();
anyhow::ensure!(server_fp == local_fp, "fingerprint mismatch");
Ok(())
}
async fn fetch_key_package(
as_client: &authentication_service::Client,
identity_key: &[u8],
) -> anyhow::Result<Vec<u8>> {
let mut req = as_client.fetch_key_package_request();
req.get().set_identity_key(identity_key);
let resp = req
.send()
.promise
.await
.context("fetch_key_package RPC failed")?;
let pkg = resp
.get()
.context("fetch_key_package: bad response")?
.get_package()
.context("fetch_key_package: missing package")?
.to_vec();
Ok(pkg)
}
async fn enqueue(
ds_client: &delivery_service::Client,
recipient_key: &[u8],
payload: &[u8],
) -> anyhow::Result<()> {
let mut req = ds_client.enqueue_request();
req.get().set_recipient_key(recipient_key);
req.get().set_payload(payload);
req.send().promise.await.context("enqueue RPC failed")?;
Ok(())
}
async fn fetch_all(
ds_client: &delivery_service::Client,
recipient_key: &[u8],
) -> anyhow::Result<Vec<Vec<u8>>> {
let mut req = ds_client.fetch_request();
req.get().set_recipient_key(recipient_key);
let resp = req.send().promise.await.context("fetch RPC failed")?;
let list = resp
.get()
.context("fetch: bad response")?
.get_payloads()
.context("fetch: missing payloads")?;
let mut payloads = Vec::with_capacity(list.len() as usize);
for i in 0..list.len() {
payloads.push(list.get(i).context("fetch: payload read failed")?.to_vec());
}
Ok(payloads)
}
// ── Inline service implementations ─────────────────────────────────────────-
struct AuthService {
store: KeyPackageStore,
}
impl authentication_service::Server for AuthService {
fn upload_key_package(
&mut self,
params: authentication_service::UploadKeyPackageParams,
mut results: authentication_service::UploadKeyPackageResults,
) -> Promise<(), capnp::Error> {
let params = match params.get() {
Ok(p) => p,
Err(e) => return Promise::err(e),
};
let ik = match params.get_identity_key() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
};
let pkg = match params.get_package() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
};
let fp: Vec<u8> = Sha256::digest(&pkg).to_vec();
self.store.entry(ik).or_default().push_back(pkg);
results.get().set_fingerprint(&fp);
Promise::ok(())
}
fn fetch_key_package(
&mut self,
params: authentication_service::FetchKeyPackageParams,
mut results: authentication_service::FetchKeyPackageResults,
) -> Promise<(), capnp::Error> {
let ik = match params.get() {
Ok(p) => match p.get_identity_key() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
},
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
};
let pkg = self
.store
.get_mut(&ik)
.and_then(|mut q| q.pop_front())
.unwrap_or_default();
results.get().set_package(&pkg);
Promise::ok(())
}
}
struct DeliveryService {
store: DeliveryStore,
}
impl delivery_service::Server for DeliveryService {
fn enqueue(
&mut self,
params: delivery_service::EnqueueParams,
_results: delivery_service::EnqueueResults,
) -> Promise<(), capnp::Error> {
let params = match params.get() {
Ok(p) => p,
Err(e) => return Promise::err(e),
};
let recipient = match params.get_recipient_key() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
};
let payload = match params.get_payload() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
};
self.store.entry(recipient).or_default().push_back(payload);
Promise::ok(())
}
fn fetch(
&mut self,
params: delivery_service::FetchParams,
mut results: delivery_service::FetchResults,
) -> Promise<(), capnp::Error> {
let recipient = match params.get() {
Ok(p) => match p.get_recipient_key() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
},
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
};
let messages: Vec<Vec<u8>> = self
.store
.get_mut(&recipient)
.map(|mut q| q.drain(..).collect())
.unwrap_or_default();
let mut list = results.get().init_payloads(messages.len() as u32);
for (i, msg) in messages.iter().enumerate() {
list.set(i as u32, msg);
}
Promise::ok(())
}
}

View File

@@ -13,8 +13,8 @@ use std::sync::Arc;
use tokio::net::TcpListener;
use noiseml_core::{NoiseKeypair, handshake_initiator, handshake_responder};
use noiseml_proto::{MsgType, ParsedEnvelope};
use quicnprotochat_core::{handshake_initiator, handshake_responder, NoiseKeypair};
use quicnprotochat_proto::{MsgType, ParsedEnvelope};
/// Completes a full Noise_XX handshake and Ping/Pong exchange, then verifies
/// mutual authentication by comparing observed vs. actual static public keys.
@@ -96,13 +96,10 @@ async fn noise_xx_ping_pong_round_trip() {
.await
.expect("client send_envelope failed");
let pong = tokio::time::timeout(
std::time::Duration::from_secs(5),
transport.recv_envelope(),
)
.await
.expect("timed out waiting for Pong — server task likely panicked")
.expect("client recv_envelope failed");
let pong = tokio::time::timeout(std::time::Duration::from_secs(5), transport.recv_envelope())
.await
.expect("timed out waiting for Pong — server task likely panicked")
.expect("client recv_envelope failed");
match pong.msg_type {
MsgType::Pong => {}
@@ -135,9 +132,7 @@ async fn noise_xx_ping_pong_round_trip() {
async fn two_sequential_connections_both_authenticate() {
let server_keypair = Arc::new(NoiseKeypair::generate());
let listener = TcpListener::bind("127.0.0.1:0")
.await
.expect("bind failed");
let listener = TcpListener::bind("127.0.0.1:0").await.expect("bind failed");
let server_addr = listener.local_addr().expect("local_addr failed");
let server_kp = Arc::clone(&server_keypair);
@@ -186,13 +181,10 @@ async fn two_sequential_connections_both_authenticate() {
.await
.expect("client send failed");
let pong = tokio::time::timeout(
std::time::Duration::from_secs(5),
t.recv_envelope(),
)
.await
.expect("timeout")
.expect("recv failed");
let pong = tokio::time::timeout(std::time::Duration::from_secs(5), t.recv_envelope())
.await
.expect("timeout")
.expect("recv failed");
match pong.msg_type {
MsgType::Pong => {}

View File

@@ -1,8 +1,8 @@
[package]
name = "noiseml-core"
name = "quicnprotochat-core"
version = "0.1.0"
edition = "2021"
description = "Crypto primitives, Noise_XX transport, MLS state machine, and Cap'n Proto frame codec for noiseml."
description = "Crypto primitives, TLS/QUIC transport, MLS state machine, and Cap'n Proto frame codec for quicnprotochat."
license = "MIT"
[dependencies]
@@ -20,10 +20,13 @@ openmls = { workspace = true }
openmls_rust_crypto = { workspace = true }
openmls_traits = { workspace = true }
tls_codec = { workspace = true }
serde = { workspace = true }
bincode = { workspace = true }
serde_json = { workspace = true }
# Serialisation
capnp = { workspace = true }
noiseml-proto = { path = "../noiseml-proto" }
quicnprotochat-proto = { path = "../quicnprotochat-proto" }
# Async runtime + codec
tokio = { workspace = true }

View File

@@ -92,8 +92,7 @@ impl Decoder for LengthPrefixedCodec {
}
// Peek at the length without advancing — avoid mutating state on None.
let frame_len =
u32::from_le_bytes([src[0], src[1], src[2], src[3]]) as usize;
let frame_len = u32::from_le_bytes([src[0], src[1], src[2], src[3]]) as usize;
if frame_len > NOISE_MAX_MSG {
return Err(CodecError::FrameTooLarge {
@@ -139,7 +138,7 @@ mod tests {
#[test]
fn round_trip_small_payload() {
let payload = b"hello noiseml";
let payload = b"hello quicnprotochat";
let result = encode_then_decode(payload);
assert_eq!(&result[..], payload);
}

View File

@@ -1,4 +1,4 @@
//! Error types for `noiseml-core`.
//! Error types for `quicnprotochat-core`.
//!
//! Two separate error types are used to preserve type-level separation of concerns:
//!

View File

@@ -3,7 +3,7 @@
//! # Design
//!
//! [`GroupMember`] wraps an openmls [`MlsGroup`] plus the per-client
//! [`OpenMlsRustCrypto`] backend. The backend is **persistent** — it holds the
//! [`StoreCrypto`] backend. The backend is **persistent** — it holds the
//! in-memory key store that maps init-key references to HPKE private keys.
//! openmls's `new_from_welcome` reads those private keys from the key store to
//! decrypt the Welcome, so the same backend instance must be used from
@@ -28,20 +28,22 @@
use std::sync::Arc;
use openmls::prelude::{
Ciphersuite, CryptoConfig, Credential, CredentialType, CredentialWithKey,
GroupId, KeyPackage, KeyPackageIn, MlsGroup, MlsGroupConfig, MlsMessageInBody,
MlsMessageOut, ProcessedMessageContent, ProtocolMessage, ProtocolVersion,
TlsDeserializeTrait, TlsSerializeTrait,
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, GroupId, KeyPackage,
KeyPackageIn, MlsGroup, MlsGroupConfig, MlsMessageInBody, MlsMessageOut,
ProcessedMessageContent, ProtocolMessage, ProtocolVersion, TlsDeserializeTrait,
TlsSerializeTrait,
};
use openmls_rust_crypto::OpenMlsRustCrypto;
use openmls_traits::OpenMlsCryptoProvider;
use crate::{error::CoreError, identity::IdentityKeypair};
use crate::{
error::CoreError,
identity::IdentityKeypair,
keystore::{DiskKeyStore, StoreCrypto},
};
// ── Constants ─────────────────────────────────────────────────────────────────
const CIPHERSUITE: Ciphersuite =
Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
const CIPHERSUITE: Ciphersuite = Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
// ── GroupMember ───────────────────────────────────────────────────────────────
@@ -61,7 +63,7 @@ const CIPHERSUITE: Ciphersuite =
pub struct GroupMember {
/// Persistent crypto backend. Holds the in-memory key store with HPKE
/// private keys created during `generate_key_package`.
backend: OpenMlsRustCrypto,
backend: StoreCrypto,
/// Long-term Ed25519 identity keypair. Also used as the MLS `Signer`.
identity: Arc<IdentityKeypair>,
/// Active MLS group, if any.
@@ -73,16 +75,23 @@ pub struct GroupMember {
impl GroupMember {
/// Create a new `GroupMember` with a fresh crypto backend.
pub fn new(identity: Arc<IdentityKeypair>) -> Self {
Self::new_with_state(identity, DiskKeyStore::ephemeral(), None)
}
/// Create a `GroupMember` from pre-existing state (identity + optional group + store).
pub fn new_with_state(
identity: Arc<IdentityKeypair>,
key_store: DiskKeyStore,
group: Option<MlsGroup>,
) -> Self {
let config = MlsGroupConfig::builder()
// Embed the ratchet tree in Welcome messages so joinees do not
// need an out-of-band tree delivery mechanism.
.use_ratchet_tree_extension(true)
.build();
Self {
backend: OpenMlsRustCrypto::default(),
backend: StoreCrypto::new(key_store),
identity,
group: None,
group,
config,
}
}
@@ -195,11 +204,7 @@ impl GroupMember {
// Create the Commit + Welcome. The third return value (GroupInfo) is for
// external commits and is not needed here.
let (commit_out, welcome_out, _group_info) = group
.add_members(
&self.backend,
self.identity.as_ref(),
&[key_package],
)
.add_members(&self.backend, self.identity.as_ref(), &[key_package])
.map_err(|e| CoreError::Mls(format!("add_members: {e:?}")))?;
// Merge the pending Commit into our own state, advancing the epoch.
@@ -231,9 +236,8 @@ impl GroupMember {
/// [`generate_key_package`]: Self::generate_key_package
pub fn join_group(&mut self, welcome_bytes: &[u8]) -> Result<(), CoreError> {
// Deserialise MlsMessageIn, then extract the inner Welcome.
let msg_in =
openmls::prelude::MlsMessageIn::tls_deserialize(&mut welcome_bytes.as_ref())
.map_err(|e| CoreError::Mls(format!("Welcome deserialise: {e:?}")))?;
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut welcome_bytes.as_ref())
.map_err(|e| CoreError::Mls(format!("Welcome deserialise: {e:?}")))?;
// into_welcome() is feature-gated in openmls 0.5; extract() is public.
let welcome = match msg_in.extract() {
@@ -243,13 +247,8 @@ impl GroupMember {
// ratchet_tree = None because use_ratchet_tree_extension = true embeds
// the tree inside the Welcome's GroupInfo extension.
let group = MlsGroup::new_from_welcome(
&self.backend,
&self.config,
welcome,
None,
)
.map_err(|e| CoreError::Mls(format!("new_from_welcome: {e:?}")))?;
let group = MlsGroup::new_from_welcome(&self.backend, &self.config, welcome, None)
.map_err(|e| CoreError::Mls(format!("new_from_welcome: {e:?}")))?;
self.group = Some(group);
Ok(())
@@ -298,9 +297,8 @@ impl GroupMember {
.as_mut()
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
let msg_in =
openmls::prelude::MlsMessageIn::tls_deserialize(&mut bytes.as_ref())
.map_err(|e| CoreError::Mls(format!("message deserialise: {e:?}")))?;
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut bytes.as_ref())
.map_err(|e| CoreError::Mls(format!("message deserialise: {e:?}")))?;
// into_protocol_message() is feature-gated; extract() + manual construction is not.
let protocol_message = match msg_in.extract() {
@@ -314,9 +312,7 @@ impl GroupMember {
.map_err(|e| CoreError::Mls(format!("process_message: {e:?}")))?;
match processed.into_content() {
ProcessedMessageContent::ApplicationMessage(app) => {
Ok(Some(app.into_bytes()))
}
ProcessedMessageContent::ApplicationMessage(app) => Ok(Some(app.into_bytes())),
ProcessedMessageContent::StagedCommitMessage(staged) => {
// Merge the Commit into the local state (epoch advances).
group
@@ -350,6 +346,21 @@ impl GroupMember {
&self.identity
}
/// Return the private seed of the identity (for persistence).
pub fn identity_seed(&self) -> [u8; 32] {
self.identity.seed_bytes()
}
/// Return a reference to the underlying crypto backend.
pub fn backend(&self) -> &StoreCrypto {
&self.backend
}
/// Return a reference to the MLS group, if active.
pub fn group_ref(&self) -> Option<&MlsGroup> {
self.group.as_ref()
}
// ── Private helpers ───────────────────────────────────────────────────────
fn make_credential_with_key(&self) -> Result<CredentialWithKey, CoreError> {
@@ -385,7 +396,9 @@ mod tests {
let bob_kp = bob.generate_key_package().expect("Bob KeyPackage");
// Alice creates the group.
alice.create_group(b"test-group-m3").expect("Alice create group");
alice
.create_group(b"test-group-m3")
.expect("Alice create group");
// Alice adds Bob → (commit, welcome).
// Alice is the sole existing member, so she merges the commit herself.

View File

@@ -22,6 +22,7 @@
use ed25519_dalek::{Signer as DalekSigner, SigningKey, VerifyingKey};
use openmls_traits::signatures::Signer;
use openmls_traits::types::{Error as MlsError, SignatureScheme};
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256};
use zeroize::Zeroizing;
@@ -39,6 +40,23 @@ pub struct IdentityKeypair {
verifying: VerifyingKey,
}
impl IdentityKeypair {
/// Recreate an identity keypair from a 32-byte seed.
pub fn from_seed(seed: [u8; 32]) -> Self {
let signing = SigningKey::from_bytes(&seed);
let verifying = signing.verifying_key();
Self {
seed: Zeroizing::new(seed),
verifying,
}
}
/// Return the raw 32-byte private seed (for persistence).
pub fn seed_bytes(&self) -> [u8; 32] {
*self.seed
}
}
impl IdentityKeypair {
/// Generate a fresh random Ed25519 identity keypair.
pub fn generate() -> Self {
@@ -84,6 +102,29 @@ impl Signer for IdentityKeypair {
}
}
impl Serialize for IdentityKeypair {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_bytes(&self.seed[..])
}
}
impl<'de> Deserialize<'de> for IdentityKeypair {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let bytes: Vec<u8> = serde::Deserialize::deserialize(deserializer)?;
let seed: [u8; 32] = bytes
.as_slice()
.try_into()
.map_err(|_| serde::de::Error::custom("identity seed must be 32 bytes"))?;
Ok(IdentityKeypair::from_seed(seed))
}
}
impl std::fmt::Debug for IdentityKeypair {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let fp = self.fingerprint();

View File

@@ -14,7 +14,7 @@
//! # Wire format
//!
//! KeyPackages are TLS-encoded using `tls_codec` (same version as openmls).
//! The resulting bytes are opaque to the noiseml transport layer.
//! The resulting bytes are opaque to the quicnprotochat transport layer.
use openmls::prelude::{
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, KeyPackage,
@@ -25,9 +25,8 @@ use sha2::{Digest, Sha256};
use crate::{error::CoreError, identity::IdentityKeypair};
/// The MLS ciphersuite used throughout noiseml.
const CIPHERSUITE: Ciphersuite =
Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
/// The MLS ciphersuite used throughout quicnprotochat.
const CIPHERSUITE: Ciphersuite = Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
/// Generate a fresh MLS KeyPackage for `identity` and serialise it.
///
@@ -41,18 +40,13 @@ const CIPHERSUITE: Ciphersuite =
///
/// Returns [`CoreError::Mls`] if openmls fails to create the KeyPackage or if
/// TLS serialisation fails.
pub fn generate_key_package(
identity: &IdentityKeypair,
) -> Result<(Vec<u8>, Vec<u8>), CoreError> {
pub fn generate_key_package(identity: &IdentityKeypair) -> Result<(Vec<u8>, Vec<u8>), CoreError> {
let backend = OpenMlsRustCrypto::default();
// Build a BasicCredential using the raw Ed25519 public key bytes as the
// MLS identity. Per RFC 9420, any byte string may serve as the identity.
let credential = Credential::new(
identity.public_key_bytes().to_vec(),
CredentialType::Basic,
)
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
let credential = Credential::new(identity.public_key_bytes().to_vec(), CredentialType::Basic)
.map_err(|e| CoreError::Mls(format!("{e:?}")))?;
// The `signature_key` in CredentialWithKey is the Ed25519 public key that
// will be used to verify the KeyPackage's leaf node signature.

View File

@@ -113,7 +113,9 @@ mod tests {
let private = kp.private_bytes();
// We cannot observe zeroization after drop in a test without unsafe,
// but we can confirm the wrapper type is returned and is non-zero.
assert!(private.iter().any(|&b| b != 0),
"freshly generated private key should not be all zeros");
assert!(
private.iter().any(|&b| b != 0),
"freshly generated private key should not be all zeros"
);
}
}

View File

@@ -0,0 +1,144 @@
use std::{
collections::HashMap,
fs,
path::{Path, PathBuf},
sync::RwLock,
};
use openmls_rust_crypto::RustCrypto;
use openmls_traits::{
key_store::{MlsEntity, OpenMlsKeyStore},
OpenMlsCryptoProvider,
};
/// A disk-backed key store implementing `OpenMlsKeyStore`.
///
/// In-memory when `path` is `None`; otherwise flushes the entire map to disk on
/// every store/delete so HPKE init keys survive process restarts.
#[derive(Debug)]
pub struct DiskKeyStore {
path: Option<PathBuf>,
values: RwLock<HashMap<Vec<u8>, Vec<u8>>>,
}
#[derive(thiserror::Error, Debug, PartialEq, Eq)]
pub enum DiskKeyStoreError {
#[error("serialization error")]
Serialization,
#[error("io error: {0}")]
Io(String),
}
impl DiskKeyStore {
/// In-memory keystore (no persistence).
pub fn ephemeral() -> Self {
Self {
path: None,
values: RwLock::new(HashMap::new()),
}
}
/// Persistent keystore backed by `path`. Creates an empty store if missing.
pub fn persistent(path: impl AsRef<Path>) -> Result<Self, DiskKeyStoreError> {
let path = path.as_ref().to_path_buf();
let values = if path.exists() {
let bytes = fs::read(&path).map_err(|e| DiskKeyStoreError::Io(e.to_string()))?;
if bytes.is_empty() {
HashMap::new()
} else {
bincode::deserialize(&bytes).map_err(|_| DiskKeyStoreError::Serialization)?
}
} else {
HashMap::new()
};
Ok(Self {
path: Some(path),
values: RwLock::new(values),
})
}
fn flush(&self) -> Result<(), DiskKeyStoreError> {
let Some(path) = &self.path else {
return Ok(());
};
let values = self.values.read().unwrap();
let bytes = bincode::serialize(&*values).map_err(|_| DiskKeyStoreError::Serialization)?;
if let Some(parent) = path.parent() {
fs::create_dir_all(parent).map_err(|e| DiskKeyStoreError::Io(e.to_string()))?;
}
fs::write(path, bytes).map_err(|e| DiskKeyStoreError::Io(e.to_string()))
}
}
impl Default for DiskKeyStore {
fn default() -> Self {
Self::ephemeral()
}
}
impl OpenMlsKeyStore for DiskKeyStore {
type Error = DiskKeyStoreError;
fn store<V: MlsEntity>(&self, k: &[u8], v: &V) -> Result<(), Self::Error> {
let value = serde_json::to_vec(v).map_err(|_| DiskKeyStoreError::Serialization)?;
let mut values = self.values.write().unwrap();
values.insert(k.to_vec(), value);
drop(values);
self.flush()
}
fn read<V: MlsEntity>(&self, k: &[u8]) -> Option<V> {
let values = self.values.read().unwrap();
values
.get(k)
.and_then(|bytes| serde_json::from_slice(bytes).ok())
}
fn delete<V: MlsEntity>(&self, k: &[u8]) -> Result<(), Self::Error> {
let mut values = self.values.write().unwrap();
values.remove(k);
drop(values);
self.flush()
}
}
/// Crypto provider that couples RustCrypto with a disk-backed key store.
#[derive(Debug)]
pub struct StoreCrypto {
crypto: RustCrypto,
key_store: DiskKeyStore,
}
impl StoreCrypto {
pub fn new(key_store: DiskKeyStore) -> Self {
Self {
crypto: RustCrypto::default(),
key_store,
}
}
}
impl Default for StoreCrypto {
fn default() -> Self {
Self::new(DiskKeyStore::ephemeral())
}
}
impl OpenMlsCryptoProvider for StoreCrypto {
type CryptoProvider = RustCrypto;
type RandProvider = RustCrypto;
type KeyStoreProvider = DiskKeyStore;
fn crypto(&self) -> &Self::CryptoProvider {
&self.crypto
}
fn rand(&self) -> &Self::RandProvider {
&self.crypto
}
fn key_store(&self) -> &Self::KeyStoreProvider {
&self.key_store
}
}

View File

@@ -1,5 +1,5 @@
//! Core cryptographic primitives, Noise_XX transport, MLS group state machine,
//! and frame codec for noiseml.
//! and frame codec for quicnprotochat.
//!
//! # Module layout
//!
@@ -17,8 +17,9 @@ mod codec;
mod error;
mod group;
mod identity;
mod keypair;
mod keypackage;
mod keypair;
mod keystore;
mod noise;
// ── Public API ────────────────────────────────────────────────────────────────
@@ -27,6 +28,7 @@ pub use codec::{LengthPrefixedCodec, NOISE_MAX_MSG};
pub use error::{CodecError, CoreError, MAX_PLAINTEXT_LEN};
pub use group::GroupMember;
pub use identity::IdentityKeypair;
pub use keypair::NoiseKeypair;
pub use keypackage::generate_key_package;
pub use keypair::NoiseKeypair;
pub use keystore::DiskKeyStore;
pub use noise::{handshake_initiator, handshake_responder, NoiseTransport};

View File

@@ -32,7 +32,7 @@
use bytes::Bytes;
use futures::{SinkExt, StreamExt};
use tokio::{
io::{AsyncReadExt, AsyncWriteExt, DuplexStream, ReadHalf, WriteHalf, duplex},
io::{duplex, AsyncReadExt, AsyncWriteExt, DuplexStream, ReadHalf, WriteHalf},
net::TcpStream,
};
use tokio_util::codec::Framed;
@@ -42,9 +42,9 @@ use crate::{
error::{CoreError, MAX_PLAINTEXT_LEN},
keypair::NoiseKeypair,
};
use noiseml_proto::{parse_envelope, build_envelope, ParsedEnvelope};
use quicnprotochat_proto::{build_envelope, parse_envelope, ParsedEnvelope};
/// Noise parameters used throughout noiseml.
/// Noise parameters used throughout quicnprotochat.
///
/// `Noise_XX_25519_ChaChaPoly_BLAKE2s` — both parties authenticate each
/// other's static X25519 keys; ChaCha20-Poly1305 for AEAD; BLAKE2s as PRF.
@@ -144,7 +144,7 @@ impl NoiseTransport {
/// Serialise and encrypt a [`ParsedEnvelope`], then send it.
///
/// This is the primary application-level send method. The Cap'n Proto
/// encoding is done by [`noiseml_proto::build_envelope`] before encryption.
/// encoding is done by [`quicnprotochat_proto::build_envelope`] before encryption.
pub async fn send_envelope(&mut self, env: &ParsedEnvelope) -> Result<(), CoreError> {
let bytes = build_envelope(env).map_err(CoreError::Capnp)?;
self.send_frame(&bytes).await
@@ -244,9 +244,10 @@ impl NoiseTransport {
impl std::fmt::Debug for NoiseTransport {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let remote = self.remote_static.as_deref().map(|k| {
format!("{:02x}{:02x}{:02x}{:02x}", k[0], k[1], k[2], k[3])
});
let remote = self
.remote_static
.as_deref()
.map(|k| format!("{:02x}{:02x}{:02x}{:02x}", k[0], k[1], k[2], k[3]));
f.debug_struct("NoiseTransport")
.field("remote_static", &remote)
.finish_non_exhaustive()
@@ -270,9 +271,9 @@ pub async fn handshake_initiator(
stream: TcpStream,
keypair: &NoiseKeypair,
) -> Result<NoiseTransport, CoreError> {
let params: snow::params::NoiseParams = NOISE_PARAMS.parse().expect(
"NOISE_PARAMS is a compile-time constant and must parse successfully",
);
let params: snow::params::NoiseParams = NOISE_PARAMS
.parse()
.expect("NOISE_PARAMS is a compile-time constant and must parse successfully");
// The private key bytes are held in a Zeroizing wrapper and cleared after
// snow clones them internally during build_initiator().
@@ -337,9 +338,9 @@ pub async fn handshake_responder(
stream: TcpStream,
keypair: &NoiseKeypair,
) -> Result<NoiseTransport, CoreError> {
let params: snow::params::NoiseParams = NOISE_PARAMS.parse().expect(
"NOISE_PARAMS is a compile-time constant and must parse successfully",
);
let params: snow::params::NoiseParams = NOISE_PARAMS
.parse()
.expect("NOISE_PARAMS is a compile-time constant and must parse successfully");
let private = keypair.private_bytes();
let mut session = snow::Builder::new(params)

View File

@@ -1,8 +1,8 @@
[package]
name = "noiseml-proto"
name = "quicnprotochat-proto"
version = "0.1.0"
edition = "2021"
description = "Cap'n Proto schemas, generated types, and serialisation helpers for noiseml. No crypto, no I/O."
description = "Cap'n Proto schemas, generated types, and serialisation helpers for quicnprotochat. No crypto, no I/O."
license = "MIT"
# build.rs invokes capnpc to generate Rust source from .capnp schemas.

View File

@@ -1,4 +1,4 @@
//! Build script for noiseml-proto.
//! Build script for quicnprotochat-proto.
//!
//! Invokes the `capnp` compiler to generate Rust types from `.capnp` schemas
//! located in the workspace-root `schemas/` directory.
@@ -14,11 +14,10 @@
use std::{env, path::PathBuf};
fn main() {
let manifest_dir = PathBuf::from(
env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR not set by Cargo"),
);
let manifest_dir =
PathBuf::from(env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR not set by Cargo"));
// Workspace root is two levels above this crate (noiseml/crates/noiseml-proto).
// Workspace root is two levels above this crate (quicnprotochat/crates/quicnprotochat-proto).
let workspace_root = manifest_dir
.join("../..")
.canonicalize()
@@ -39,6 +38,10 @@ fn main() {
"cargo:rerun-if-changed={}",
schemas_dir.join("delivery.capnp").display()
);
println!(
"cargo:rerun-if-changed={}",
schemas_dir.join("node.capnp").display()
);
capnpc::CompilerCommand::new()
// Treat `schemas/` as the include root so that inter-schema imports
@@ -47,6 +50,7 @@ fn main() {
.file(schemas_dir.join("envelope.capnp"))
.file(schemas_dir.join("auth.capnp"))
.file(schemas_dir.join("delivery.capnp"))
.file(schemas_dir.join("node.capnp"))
.run()
.expect(
"Cap'n Proto schema compilation failed. \

View File

@@ -1,4 +1,4 @@
//! Cap'n Proto schemas, generated types, and serialisation helpers for noiseml.
//! Cap'n Proto schemas, generated types, and serialisation helpers for quicnprotochat.
//!
//! # Design constraints
//!
@@ -41,11 +41,18 @@ pub mod delivery_capnp {
include!(concat!(env!("OUT_DIR"), "/delivery_capnp.rs"));
}
/// Cap'n Proto generated types for `schemas/node.capnp`.
///
/// Do not edit this module by hand — it is entirely machine-generated.
pub mod node_capnp {
include!(concat!(env!("OUT_DIR"), "/node_capnp.rs"));
}
// ── Re-exports ────────────────────────────────────────────────────────────────
/// The message-type discriminant from the `Envelope` schema.
///
/// Re-exported here so callers can `use noiseml_proto::MsgType` without
/// Re-exported here so callers can `use quicnprotochat_proto::MsgType` without
/// spelling out the full generated module path.
pub use envelope_capnp::envelope::MsgType;
@@ -80,7 +87,7 @@ pub struct ParsedEnvelope {
///
/// The returned bytes include the Cap'n Proto segment table header followed by
/// the message data. They are suitable for use as the body of a length-prefixed
/// noiseml frame (the frame codec in `noiseml-core` prepends the 4-byte length).
/// quicnprotochat frame (the frame codec in `quicnprotochat-core` prepends the 4-byte length).
///
/// # Errors
///
@@ -135,7 +142,7 @@ pub fn parse_envelope(bytes: &[u8]) -> Result<ParsedEnvelope, capnp::Error> {
/// Serialise a Cap'n Proto message builder to unpacked wire bytes.
///
/// The output includes the segment table header. For transport, the
/// `noiseml-core` frame codec prepends a 4-byte little-endian length field.
/// `quicnprotochat-core` frame codec prepends a 4-byte little-endian length field.
pub fn to_bytes<A: capnp::message::Allocator>(
msg: &capnp::message::Builder<A>,
) -> Result<Vec<u8>, capnp::Error> {

View File

@@ -1,17 +1,17 @@
[package]
name = "noiseml-server"
name = "quicnprotochat-server"
version = "0.1.0"
edition = "2021"
description = "Delivery Service and Authentication Service for noiseml."
description = "Delivery Service and Authentication Service for quicnprotochat."
license = "MIT"
[[bin]]
name = "noiseml-server"
name = "quicnprotochat-server"
path = "src/main.rs"
[dependencies]
noiseml-core = { path = "../noiseml-core" }
noiseml-proto = { path = "../noiseml-proto" }
quicnprotochat-core = { path = "../quicnprotochat-core" }
quicnprotochat-proto = { path = "../quicnprotochat-proto" }
# Serialisation + RPC
capnp = { workspace = true }
@@ -27,10 +27,16 @@ dashmap = { workspace = true }
sha2 = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
quinn = { workspace = true }
quinn-proto = { workspace = true }
rustls = { workspace = true }
rcgen = { workspace = true }
# Error handling
anyhow = { workspace = true }
thiserror = { workspace = true }
bincode = { workspace = true }
serde = { workspace = true }
# CLI
clap = { workspace = true }

View File

@@ -0,0 +1,508 @@
//! quicnprotochat-server — unified Authentication + Delivery service.
//!
//! # M3 scope
//!
//! The server exposes a single QUIC + TLS 1.3 Cap'n Proto RPC endpoint
//! (`NodeService`) combining Authentication and Delivery operations.
//!
//! # Architecture
//!
//! ```text
//! QUIC endpoint (7000)
//! └─ TLS 1.3 handshake (self-signed by default)
//! └─ capnp-rpc VatNetwork (LocalSet, !Send)
//! └─ NodeServiceImpl (KeyPackage + Delivery queues)
//! ```
//!
//! Because `capnp-rpc` uses `Rc<RefCell<>>` internally it is `!Send`.
//! The entire RPC stack lives on a `tokio::task::LocalSet` spawned per
//! connection.
//!
//! # Configuration
//!
//! | Env var | CLI flag | Default |
//! |---------------------|----------------|-----------------|
//! | `QUICNPROTOCHAT_LISTEN` | `--listen` | `0.0.0.0:7000` |
//! | `RUST_LOG` | — | `info` |
use std::{fs, net::SocketAddr, path::PathBuf, sync::Arc, time::Duration};
use anyhow::Context;
use capnp::capability::Promise;
use capnp_rpc::{rpc_twoparty_capnp::Side, twoparty, RpcSystem};
use clap::Parser;
use dashmap::DashMap;
use quicnprotochat_proto::node_capnp::node_service;
use quinn::{Endpoint, ServerConfig};
use quinn_proto::crypto::rustls::QuicServerConfig;
use rcgen::generate_simple_self_signed;
use rustls::pki_types::{CertificateDer, PrivateKeyDer};
use sha2::{Digest, Sha256};
use tokio::sync::Notify;
use tokio::time::timeout;
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
mod storage;
use storage::{FileBackedStore, StorageError};
// ── CLI ───────────────────────────────────────────────────────────────────────
#[derive(Debug, Parser)]
#[command(
name = "quicnprotochat-server",
about = "quicnprotochat Delivery Service + Authentication Service",
version
)]
struct Args {
/// QUIC listen address (host:port).
#[arg(long, default_value = "0.0.0.0:7000", env = "QUICNPROTOCHAT_LISTEN")]
listen: String,
/// Directory for persisted server data (KeyPackages + delivery queues).
#[arg(long, default_value = "data", env = "QUICNPROTOCHAT_DATA_DIR")]
data_dir: String,
/// TLS certificate path (generated automatically if missing).
#[arg(
long,
default_value = "data/server-cert.der",
env = "QUICNPROTOCHAT_TLS_CERT"
)]
tls_cert: PathBuf,
/// TLS private key path (generated automatically if missing).
#[arg(
long,
default_value = "data/server-key.der",
env = "QUICNPROTOCHAT_TLS_KEY"
)]
tls_key: PathBuf,
}
// ── Node service implementation ─────────────────────────────────────────────
/// Cap'n Proto RPC server implementation for `NodeService` (Auth + Delivery).
struct NodeServiceImpl {
store: Arc<FileBackedStore>,
waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
}
impl NodeServiceImpl {
fn waiter(&self, recipient_key: &[u8]) -> Arc<Notify> {
self.waiters
.entry(recipient_key.to_vec())
.or_insert_with(|| Arc::new(Notify::new()))
.clone()
}
}
impl node_service::Server for NodeServiceImpl {
/// Upload a single-use KeyPackage and return its SHA-256 fingerprint.
fn upload_key_package(
&mut self,
params: node_service::UploadKeyPackageParams,
mut results: node_service::UploadKeyPackageResults,
) -> Promise<(), capnp::Error> {
let params = params
.get()
.map_err(|e| capnp::Error::failed(format!("upload_key_package: bad params: {e}")));
let (identity_key, package) = match params {
Ok(p) => {
let ik = match p.get_identity_key() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
};
let pkg = match p.get_package() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
};
(ik, pkg)
}
Err(e) => return Promise::err(e),
};
if identity_key.len() != 32 {
return Promise::err(capnp::Error::failed(format!(
"identityKey must be exactly 32 bytes, got {}",
identity_key.len()
)));
}
if package.is_empty() {
return Promise::err(capnp::Error::failed(
"package must not be empty".to_string(),
));
}
let fingerprint: Vec<u8> = Sha256::digest(&package).to_vec();
if let Err(e) = self
.store
.upload_key_package(&identity_key, package)
.map_err(storage_err)
{
return Promise::err(e);
}
results.get().set_fingerprint(&fingerprint);
tracing::debug!(
fingerprint = %fmt_hex(&fingerprint[..4]),
"KeyPackage uploaded"
);
Promise::ok(())
}
/// Atomically remove and return one KeyPackage for the given identity key.
fn fetch_key_package(
&mut self,
params: node_service::FetchKeyPackageParams,
mut results: node_service::FetchKeyPackageResults,
) -> Promise<(), capnp::Error> {
let identity_key = match params.get() {
Ok(p) => match p.get_identity_key() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
},
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
};
if identity_key.len() != 32 {
return Promise::err(capnp::Error::failed(format!(
"identityKey must be exactly 32 bytes, got {}",
identity_key.len()
)));
}
let package = match self
.store
.fetch_key_package(&identity_key)
.map_err(storage_err)
{
Ok(p) => p,
Err(e) => return Promise::err(e),
};
match package {
Some(pkg) => {
tracing::debug!(
identity = %fmt_hex(&identity_key[..4]),
"KeyPackage fetched"
);
results.get().set_package(&pkg);
}
None => {
tracing::debug!(
identity = %fmt_hex(&identity_key[..4]),
"no KeyPackage available for identity"
);
results.get().set_package(&[]);
}
}
Promise::ok(())
}
/// Append `payload` to the queue for `recipient_key`.
fn enqueue(
&mut self,
params: node_service::EnqueueParams,
_results: node_service::EnqueueResults,
) -> Promise<(), capnp::Error> {
let p = match params.get() {
Ok(p) => p,
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
};
let recipient_key = match p.get_recipient_key() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
};
let payload = match p.get_payload() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
};
if recipient_key.len() != 32 {
return Promise::err(capnp::Error::failed(format!(
"recipientKey must be exactly 32 bytes, got {}",
recipient_key.len()
)));
}
if payload.is_empty() {
return Promise::err(capnp::Error::failed(
"payload must not be empty".to_string(),
));
}
if let Err(e) = self
.store
.enqueue(&recipient_key, payload)
.map_err(storage_err)
{
return Promise::err(e);
}
self.waiter(&recipient_key).notify_waiters();
tracing::debug!(
recipient = %fmt_hex(&recipient_key[..4]),
"message enqueued"
);
Promise::ok(())
}
/// Atomically drain and return all queued payloads for `recipient_key`.
fn fetch(
&mut self,
params: node_service::FetchParams,
mut results: node_service::FetchResults,
) -> Promise<(), capnp::Error> {
let recipient_key = match params.get() {
Ok(p) => match p.get_recipient_key() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
},
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
};
if recipient_key.len() != 32 {
return Promise::err(capnp::Error::failed(format!(
"recipientKey must be exactly 32 bytes, got {}",
recipient_key.len()
)));
}
let messages = match self.store.fetch(&recipient_key).map_err(storage_err) {
Ok(m) => m,
Err(e) => return Promise::err(e),
};
tracing::debug!(
recipient = %fmt_hex(&recipient_key[..4]),
count = messages.len(),
"messages fetched"
);
let mut list = results.get().init_payloads(messages.len() as u32);
for (i, msg) in messages.iter().enumerate() {
list.set(i as u32, msg);
}
Promise::ok(())
}
/// Long-polling fetch with timeout (ms).
fn fetch_wait(
&mut self,
params: node_service::FetchWaitParams,
mut results: node_service::FetchWaitResults,
) -> Promise<(), capnp::Error> {
let p = match params.get() {
Ok(p) => p,
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
};
let recipient_key = match p.get_recipient_key() {
Ok(v) => v.to_vec(),
Err(e) => return Promise::err(capnp::Error::failed(format!("{e}"))),
};
let timeout_ms = p.get_timeout_ms();
if recipient_key.len() != 32 {
return Promise::err(capnp::Error::failed(format!(
"recipientKey must be exactly 32 bytes, got {}",
recipient_key.len()
)));
}
let store = Arc::clone(&self.store);
let waiters = self.waiters.clone();
Promise::from_future(async move {
let messages = store.fetch(&recipient_key).map_err(storage_err)?;
if messages.is_empty() && timeout_ms > 0 {
let waiter = waiters
.entry(recipient_key.clone())
.or_insert_with(|| Arc::new(Notify::new()))
.clone();
let _ = timeout(Duration::from_millis(timeout_ms), waiter.notified()).await;
let msgs = store.fetch(&recipient_key).map_err(storage_err)?;
fill_payloads_wait(&mut results, msgs);
return Ok(());
}
fill_payloads_wait(&mut results, messages);
Ok(())
})
}
fn health(
&mut self,
_params: node_service::HealthParams,
mut results: node_service::HealthResults,
) -> Promise<(), capnp::Error> {
results.get().set_status("ok");
Promise::ok(())
}
}
fn fill_payloads_wait(results: &mut node_service::FetchWaitResults, messages: Vec<Vec<u8>>) {
let mut list = results.get().init_payloads(messages.len() as u32);
for (i, msg) in messages.iter().enumerate() {
list.set(i as u32, msg);
}
}
fn storage_err(err: StorageError) -> capnp::Error {
capnp::Error::failed(format!("{err}"))
}
// ── Entry point ───────────────────────────────────────────────────────────────
#[tokio::main]
async fn main() -> anyhow::Result<()> {
tracing_subscriber::fmt()
.with_env_filter(
tracing_subscriber::EnvFilter::try_from_default_env()
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")),
)
.init();
let args = Args::parse();
let listen: SocketAddr = args.listen.parse().context("--listen must be host:port")?;
let server_config = build_server_config(&args.tls_cert, &args.tls_key)
.context("failed to build TLS/QUIC server config")?;
// Shared storage — persisted to disk for restart safety.
let store = Arc::new(FileBackedStore::open(&args.data_dir)?);
let waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>> = Arc::new(DashMap::new());
let endpoint = Endpoint::server(server_config, listen)?;
tracing::info!(
addr = %args.listen,
"accepting QUIC connections"
);
// capnp-rpc is !Send (Rc internals), so all RPC tasks must stay on a
// LocalSet. Both accept loops share one LocalSet.
let local = tokio::task::LocalSet::new();
local
.run_until(async move {
loop {
let incoming = match endpoint.accept().await {
Some(i) => i,
None => break,
};
let connecting = match incoming.accept() {
Ok(c) => c,
Err(e) => {
tracing::warn!(error = %e, "failed to accept incoming connection");
continue;
}
};
let store = Arc::clone(&store);
let waiters = Arc::clone(&waiters);
tokio::task::spawn_local(async move {
if let Err(e) = handle_node_connection(connecting, store, waiters).await {
tracing::warn!(error = %e, "connection error");
}
});
}
Ok::<(), anyhow::Error>(())
})
.await
}
// ── Per-connection handlers ───────────────────────────────────────────────────
/// Handle one NodeService connection.
async fn handle_node_connection(
connecting: quinn::Connecting,
store: Arc<FileBackedStore>,
waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
) -> Result<(), anyhow::Error> {
let connection = connecting.await?;
tracing::info!(peer = %connection.remote_address(), "QUIC connected");
let (send, recv) = connection
.accept_bi()
.await
.map_err(|e| anyhow::anyhow!("failed to accept bi stream: {e}"))?;
let (reader, writer) = (recv.compat(), send.compat_write());
let network = twoparty::VatNetwork::new(reader, writer, Side::Server, Default::default());
let service: node_service::Client = capnp_rpc::new_client(NodeServiceImpl { store, waiters });
RpcSystem::new(Box::new(network), Some(service.client))
.await
.map_err(|e| anyhow::anyhow!("NodeService RPC error: {e}"))
}
// ── Helpers ───────────────────────────────────────────────────────────────────
/// Format the first `n` bytes of a slice as lowercase hex with a trailing `…`.
fn fmt_hex(bytes: &[u8]) -> String {
let hex: String = bytes.iter().map(|b| format!("{b:02x}")).collect();
format!("{hex}")
}
/// Ensure a self-signed certificate exists on disk and return a QUIC server config.
fn build_server_config(cert_path: &PathBuf, key_path: &PathBuf) -> anyhow::Result<ServerConfig> {
if !cert_path.exists() || !key_path.exists() {
generate_self_signed(cert_path, key_path)?;
}
let cert_bytes = fs::read(cert_path).context("read cert")?;
let key_bytes = fs::read(key_path).context("read key")?;
let cert_chain = vec![CertificateDer::from(cert_bytes)];
let key = PrivateKeyDer::try_from(key_bytes).map_err(|_| anyhow::anyhow!("invalid key"))?;
let mut tls = rustls::ServerConfig::builder()
.with_no_client_auth()
.with_single_cert(cert_chain, key)?;
tls.alpn_protocols = vec![b"capnp".to_vec()];
let crypto = QuicServerConfig::try_from(tls)
.map_err(|e| anyhow::anyhow!("invalid server TLS config: {e}"))?;
Ok(ServerConfig::with_crypto(Arc::new(crypto)))
}
fn generate_self_signed(cert_path: &PathBuf, key_path: &PathBuf) -> anyhow::Result<()> {
if let Some(parent) = cert_path.parent() {
fs::create_dir_all(parent).context("create cert dir")?;
}
if let Some(parent) = key_path.parent() {
fs::create_dir_all(parent).context("create key dir")?;
}
let subject_alt_names = vec![
"localhost".to_string(),
"127.0.0.1".to_string(),
"::1".to_string(),
];
let issued = generate_simple_self_signed(subject_alt_names)?;
let key_der = issued.key_pair.serialize_der();
fs::write(cert_path, issued.cert.der()).context("write cert")?;
fs::write(key_path, &key_der).context("write key")?;
tracing::info!(
cert = %cert_path.display(),
key = %key_path.display(),
"generated self-signed TLS certificate"
);
Ok(())
}

View File

@@ -0,0 +1,114 @@
use std::{
collections::{HashMap, VecDeque},
fs,
path::{Path, PathBuf},
sync::Mutex,
};
use serde::{Deserialize, Serialize};
#[derive(thiserror::Error, Debug)]
pub enum StorageError {
#[error("io error: {0}")]
Io(String),
#[error("serialization error")]
Serde,
}
#[derive(Serialize, Deserialize, Default)]
struct QueueMap {
map: HashMap<Vec<u8>, VecDeque<Vec<u8>>>,
}
/// File-backed storage for KeyPackages and delivery queues.
///
/// Each mutation flushes the entire map to disk. Suitable for MVP-scale loads.
pub struct FileBackedStore {
kp_path: PathBuf,
ds_path: PathBuf,
key_packages: Mutex<HashMap<Vec<u8>, VecDeque<Vec<u8>>>>,
deliveries: Mutex<HashMap<Vec<u8>, VecDeque<Vec<u8>>>>,
}
impl FileBackedStore {
pub fn open(dir: impl AsRef<Path>) -> Result<Self, StorageError> {
let dir = dir.as_ref();
if !dir.exists() {
fs::create_dir_all(dir).map_err(|e| StorageError::Io(e.to_string()))?;
}
let kp_path = dir.join("keypackages.bin");
let ds_path = dir.join("deliveries.bin");
let key_packages = Mutex::new(Self::load_map(&kp_path)?);
let deliveries = Mutex::new(Self::load_map(&ds_path)?);
Ok(Self {
kp_path,
ds_path,
key_packages,
deliveries,
})
}
pub fn upload_key_package(
&self,
identity_key: &[u8],
package: Vec<u8>,
) -> Result<(), StorageError> {
let mut map = self.key_packages.lock().unwrap();
map.entry(identity_key.to_vec())
.or_default()
.push_back(package);
self.flush_map(&self.kp_path, &*map)
}
pub fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
let mut map = self.key_packages.lock().unwrap();
let package = map.get_mut(identity_key).and_then(|q| q.pop_front());
self.flush_map(&self.kp_path, &*map)?;
Ok(package)
}
pub fn enqueue(&self, recipient_key: &[u8], payload: Vec<u8>) -> Result<(), StorageError> {
let mut map = self.deliveries.lock().unwrap();
map.entry(recipient_key.to_vec())
.or_default()
.push_back(payload);
self.flush_map(&self.ds_path, &*map)
}
pub fn fetch(&self, recipient_key: &[u8]) -> Result<Vec<Vec<u8>>, StorageError> {
let mut map = self.deliveries.lock().unwrap();
let messages = map
.get_mut(recipient_key)
.map(|q| q.drain(..).collect())
.unwrap_or_default();
self.flush_map(&self.ds_path, &*map)?;
Ok(messages)
}
fn load_map(path: &Path) -> Result<HashMap<Vec<u8>, VecDeque<Vec<u8>>>, StorageError> {
if !path.exists() {
return Ok(HashMap::new());
}
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
if bytes.is_empty() {
return Ok(HashMap::new());
}
let map: QueueMap = bincode::deserialize(&bytes).map_err(|_| StorageError::Serde)?;
Ok(map.map)
}
fn flush_map(
&self,
path: &Path,
map: &HashMap<Vec<u8>, VecDeque<Vec<u8>>>,
) -> Result<(), StorageError> {
let payload = QueueMap { map: map.clone() };
let bytes = bincode::serialize(&payload).map_err(|_| StorageError::Serde)?;
if let Some(parent) = path.parent() {
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
}
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
}
}