feat: add delivery sequence numbers + major server/client refactor
Delivery sequence numbers (MLS epoch ordering fix):
- schemas/node.capnp: add Envelope{seq,data} struct; enqueue returns seq:UInt64;
fetch/fetchWait return List(Envelope) instead of List(Data)
- storage.rs: Store trait enqueue returns u64; fetch/fetch_limited return
Vec<(u64, Vec<u8>)>; FileBackedStore gains QueueMapV3 with per-inbox seq
counters and V2→V3 on-disk migration
- migrations/002_add_seq.sql: seq column, delivery_seq_counters table, index
- sql_store.rs: atomic UPSERT counter via RETURNING, ORDER BY seq, SCHEMA_VERSION→3
- node_service/delivery.rs: builds Envelope list; returns seq from enqueue
- client/rpc.rs: enqueue→u64, fetch_all/fetch_wait→Vec<(u64,Vec<u8>)>
- client/commands.rs: sort-by-seq before MLS processing; retry loop in cmd_recv
and receive_pending_plaintexts for correct epoch ordering
Server refactor:
- Split monolithic main.rs into node_service/{mod,delivery,auth_ops,key_ops,p2p_ops}
- Add auth.rs (token validation, rate limiting), config.rs, metrics.rs, tls.rs
- Add SQL migrations runner (001_initial.sql, 002_add_seq.sql)
- OPAQUE PAKE login/registration, sealed-sender mode, queue depth limit (1000)
Client refactor:
- Split lib.rs into client/{commands,rpc,state,retry,hex,mod}
- Add cmd_whoami, cmd_health, cmd_check_key, cmd_ping subcommands
- Add cmd_register_user, cmd_login (OPAQUE), cmd_refresh_keypackage
- Hybrid PQ envelope (X25519 + ML-KEM-768) on all send/recv paths
- E2E test suite expanded
Other:
- quicnprotochat-gui: Tauri 2 desktop GUI skeleton (backend + HTML UI)
- quicnprotochat-p2p: iroh-based P2P transport stub
- quicnprotochat-core: app_message, hybrid_crypto modules; GroupMember API updates
- .github/workflows/size-lint.yml: binary size regression check
- docs: protocol comparison, roadmap updates, fully-operational checklist
This commit is contained in:
@@ -54,3 +54,5 @@ dashmap = { workspace = true }
|
||||
assert_cmd = "2"
|
||||
tempfile = "3"
|
||||
portpicker = "0.1"
|
||||
rand = "0.8"
|
||||
hex = "0.4"
|
||||
|
||||
1112
crates/quicnprotochat-client/src/client/commands.rs
Normal file
1112
crates/quicnprotochat-client/src/client/commands.rs
Normal file
File diff suppressed because it is too large
Load Diff
13
crates/quicnprotochat-client/src/client/hex.rs
Normal file
13
crates/quicnprotochat-client/src/client/hex.rs
Normal file
@@ -0,0 +1,13 @@
|
||||
pub fn encode(bytes: impl AsRef<[u8]>) -> String {
|
||||
bytes.as_ref().iter().map(|b| format!("{b:02x}")).collect()
|
||||
}
|
||||
|
||||
pub fn decode(s: &str) -> Result<Vec<u8>, &'static str> {
|
||||
if s.len() % 2 != 0 {
|
||||
return Err("odd-length hex string");
|
||||
}
|
||||
(0..s.len())
|
||||
.step_by(2)
|
||||
.map(|i| u8::from_str_radix(&s[i..i + 2], 16).map_err(|_| "invalid hex character"))
|
||||
.collect()
|
||||
}
|
||||
9
crates/quicnprotochat-client/src/client/mod.rs
Normal file
9
crates/quicnprotochat-client/src/client/mod.rs
Normal file
@@ -0,0 +1,9 @@
|
||||
pub mod commands;
|
||||
pub mod hex;
|
||||
pub mod retry;
|
||||
pub mod rpc;
|
||||
pub mod state;
|
||||
|
||||
pub use commands::*;
|
||||
pub use rpc::{connect_node, enqueue, fetch_all, fetch_hybrid_key, fetch_key_package, fetch_wait, upload_hybrid_key, upload_key_package};
|
||||
pub use state::{decode_identity_key, load_existing_state, load_or_init_state, save_state};
|
||||
81
crates/quicnprotochat-client/src/client/retry.rs
Normal file
81
crates/quicnprotochat-client/src/client/retry.rs
Normal file
@@ -0,0 +1,81 @@
|
||||
//! Retry with exponential backoff for transient RPC failures.
|
||||
|
||||
use std::future::Future;
|
||||
use std::time::Duration;
|
||||
|
||||
use rand::Rng;
|
||||
use tracing::warn;
|
||||
|
||||
/// Default maximum number of retry attempts (including the first try).
|
||||
pub const DEFAULT_MAX_RETRIES: u32 = 3;
|
||||
/// Default base delay in milliseconds for exponential backoff.
|
||||
pub const DEFAULT_BASE_DELAY_MS: u64 = 500;
|
||||
|
||||
/// Runs an async operation with retries. On `Ok(t)` returns immediately.
|
||||
/// On `Err(e)`: if `is_retriable(&e)` and `attempt < max_retries`, sleeps with
|
||||
/// exponential backoff (plus jitter) then retries; otherwise returns the last error.
|
||||
pub async fn retry_async<F, Fut, T, E, P>(
|
||||
op: F,
|
||||
max_retries: u32,
|
||||
base_delay_ms: u64,
|
||||
is_retriable: P,
|
||||
) -> Result<T, E>
|
||||
where
|
||||
F: Fn() -> Fut,
|
||||
Fut: Future<Output = Result<T, E>>,
|
||||
P: Fn(&E) -> bool,
|
||||
{
|
||||
let mut last_err = None;
|
||||
for attempt in 0..max_retries {
|
||||
match op().await {
|
||||
Ok(t) => return Ok(t),
|
||||
Err(e) => {
|
||||
last_err = Some(e);
|
||||
let err = last_err.as_ref().unwrap();
|
||||
if !is_retriable(err) || attempt + 1 >= max_retries {
|
||||
break;
|
||||
}
|
||||
let delay_ms = base_delay_ms * 2u64.saturating_pow(attempt);
|
||||
let jitter_ms = rand::thread_rng().gen_range(0..=delay_ms / 2);
|
||||
let total_ms = delay_ms + jitter_ms;
|
||||
warn!(
|
||||
attempt = attempt + 1,
|
||||
max_retries,
|
||||
delay_ms = total_ms,
|
||||
"RPC failed, retrying after backoff"
|
||||
);
|
||||
tokio::time::sleep(Duration::from_millis(total_ms)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(last_err.expect("retry_async: last_err set when we break after Err"))
|
||||
}
|
||||
|
||||
/// Classifies `anyhow::Error` for retry: returns `false` for auth or invalid-param
|
||||
/// errors (do not retry), `true` for transient errors (network, timeout, server 5xx).
|
||||
/// When in doubt, returns `true` (retry).
|
||||
pub fn anyhow_is_retriable(err: &anyhow::Error) -> bool {
|
||||
let s = format!("{:#}", err);
|
||||
let s_lower = s.to_lowercase();
|
||||
// Do not retry: auth / permission
|
||||
if s_lower.contains("unauthorized")
|
||||
|| s_lower.contains("auth failed")
|
||||
|| s_lower.contains("access denied")
|
||||
|| s_lower.contains("401")
|
||||
|| s_lower.contains("forbidden")
|
||||
|| s_lower.contains("403")
|
||||
|| s_lower.contains("token")
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// Do not retry: bad request / invalid params
|
||||
if s_lower.contains("bad request")
|
||||
|| s_lower.contains("400")
|
||||
|| s_lower.contains("invalid param")
|
||||
|| s_lower.contains("fingerprint mismatch")
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// Retry: network, timeout, connection, server error, or anything else
|
||||
true
|
||||
}
|
||||
367
crates/quicnprotochat-client/src/client/rpc.rs
Normal file
367
crates/quicnprotochat-client/src/client/rpc.rs
Normal file
@@ -0,0 +1,367 @@
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use quinn::{ClientConfig, Endpoint};
|
||||
use quinn_proto::crypto::rustls::QuicClientConfig;
|
||||
use rustls::pki_types::CertificateDer;
|
||||
use rustls::{ClientConfig as RustlsClientConfig, RootCertStore};
|
||||
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
|
||||
use capnp_rpc::{rpc_twoparty_capnp::Side, twoparty, RpcSystem};
|
||||
|
||||
use quicnprotochat_core::HybridPublicKey;
|
||||
use quicnprotochat_proto::node_capnp::{auth, node_service};
|
||||
|
||||
use crate::AUTH_CONTEXT;
|
||||
|
||||
use super::retry::{anyhow_is_retriable, retry_async, DEFAULT_BASE_DELAY_MS, DEFAULT_MAX_RETRIES};
|
||||
|
||||
/// Establish a QUIC/TLS connection and return a `NodeService` client.
|
||||
///
|
||||
/// Must be called from within a `LocalSet` because capnp-rpc is `!Send`.
|
||||
pub async fn connect_node(
|
||||
server: &str,
|
||||
ca_cert: &Path,
|
||||
server_name: &str,
|
||||
) -> anyhow::Result<node_service::Client> {
|
||||
let addr: SocketAddr = server
|
||||
.parse()
|
||||
.with_context(|| format!("server must be host:port, got {server}"))?;
|
||||
|
||||
let cert_bytes = std::fs::read(ca_cert).with_context(|| format!("read ca_cert {ca_cert:?}"))?;
|
||||
let mut roots = RootCertStore::empty();
|
||||
roots
|
||||
.add(CertificateDer::from(cert_bytes))
|
||||
.context("add root cert")?;
|
||||
|
||||
let mut tls = RustlsClientConfig::builder()
|
||||
.with_root_certificates(roots)
|
||||
.with_no_client_auth();
|
||||
tls.alpn_protocols = vec![b"capnp".to_vec()];
|
||||
|
||||
let crypto = QuicClientConfig::try_from(tls)
|
||||
.map_err(|e| anyhow::anyhow!("invalid client TLS config: {e}"))?;
|
||||
|
||||
let bind_addr: SocketAddr = "0.0.0.0:0".parse().context("parse client bind address")?;
|
||||
let mut endpoint = Endpoint::client(bind_addr)?;
|
||||
endpoint.set_default_client_config(ClientConfig::new(Arc::new(crypto)));
|
||||
|
||||
let connection = endpoint
|
||||
.connect(addr, server_name)
|
||||
.context("quic connect init")?
|
||||
.await
|
||||
.context("quic connect failed")?;
|
||||
|
||||
let (send, recv) = connection.open_bi().await.context("open bi stream")?;
|
||||
|
||||
let network = twoparty::VatNetwork::new(
|
||||
recv.compat(),
|
||||
send.compat_write(),
|
||||
Side::Client,
|
||||
Default::default(),
|
||||
);
|
||||
|
||||
let mut rpc_system = RpcSystem::new(Box::new(network), None);
|
||||
let client: node_service::Client = rpc_system.bootstrap(Side::Server);
|
||||
|
||||
tokio::task::spawn_local(rpc_system);
|
||||
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
pub fn set_auth(auth: &mut auth::Builder<'_>) -> anyhow::Result<()> {
|
||||
let ctx = AUTH_CONTEXT.get().ok_or_else(|| {
|
||||
anyhow::anyhow!("init_auth must be called with a non-empty token before RPCs")
|
||||
})?;
|
||||
auth.set_version(ctx.version);
|
||||
auth.set_access_token(&ctx.access_token);
|
||||
auth.set_device_id(&ctx.device_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Upload a KeyPackage and verify the fingerprint echoed by the AS.
|
||||
pub async fn upload_key_package(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
package: &[u8],
|
||||
) -> anyhow::Result<()> {
|
||||
let mut req = client.upload_key_package_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
p.set_package(package);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("upload_key_package RPC failed")?;
|
||||
|
||||
let server_fp = resp
|
||||
.get()
|
||||
.context("upload_key_package: bad response")?
|
||||
.get_fingerprint()
|
||||
.context("upload_key_package: missing fingerprint")?
|
||||
.to_vec();
|
||||
|
||||
let local_fp = super::state::sha256(package);
|
||||
anyhow::ensure!(server_fp == local_fp, "fingerprint mismatch");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Fetch a KeyPackage for `identity_key` from the AS.
|
||||
pub async fn fetch_key_package(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
) -> anyhow::Result<Vec<u8>> {
|
||||
let mut req = client.fetch_key_package_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("fetch_key_package RPC failed")?;
|
||||
|
||||
let pkg = resp
|
||||
.get()
|
||||
.context("fetch_key_package: bad response")?
|
||||
.get_package()
|
||||
.context("fetch_key_package: missing package field")?
|
||||
.to_vec();
|
||||
|
||||
Ok(pkg)
|
||||
}
|
||||
|
||||
/// Enqueue an opaque payload to the DS for `recipient_key`.
|
||||
/// Returns the per-inbox sequence number assigned by the server.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn enqueue(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
payload: &[u8],
|
||||
) -> anyhow::Result<u64> {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.to_vec();
|
||||
let payload = payload.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.clone();
|
||||
let payload = payload.clone();
|
||||
async move {
|
||||
let mut req = client.enqueue_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_recipient_key(&recipient_key);
|
||||
p.set_payload(&payload);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
let resp = req.send().promise.await.context("enqueue RPC failed")?;
|
||||
let seq = resp.get().context("enqueue: bad response")?.get_seq();
|
||||
Ok(seq)
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Fetch and drain all payloads for `recipient_key`.
|
||||
/// Returns `(seq, payload)` pairs — sort by `seq` before MLS processing.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn fetch_all(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
) -> anyhow::Result<Vec<(u64, Vec<u8>)>> {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.clone();
|
||||
async move {
|
||||
let mut req = client.fetch_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_recipient_key(&recipient_key);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
p.set_limit(0); // fetch all
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req.send().promise.await.context("fetch RPC failed")?;
|
||||
|
||||
let list = resp
|
||||
.get()
|
||||
.context("fetch: bad response")?
|
||||
.get_payloads()
|
||||
.context("fetch: missing payloads")?;
|
||||
|
||||
let mut payloads = Vec::with_capacity(list.len() as usize);
|
||||
for i in 0..list.len() {
|
||||
let entry = list.get(i);
|
||||
let seq = entry.get_seq();
|
||||
let data = entry
|
||||
.get_data()
|
||||
.context("fetch: envelope data read failed")?
|
||||
.to_vec();
|
||||
payloads.push((seq, data));
|
||||
}
|
||||
|
||||
Ok(payloads)
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Long-poll for payloads with optional timeout (ms).
|
||||
/// Returns `(seq, payload)` pairs — sort by `seq` before MLS processing.
|
||||
/// Retries on transient failures with exponential backoff.
|
||||
pub async fn fetch_wait(
|
||||
client: &node_service::Client,
|
||||
recipient_key: &[u8],
|
||||
timeout_ms: u64,
|
||||
) -> anyhow::Result<Vec<(u64, Vec<u8>)>> {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.to_vec();
|
||||
retry_async(
|
||||
|| {
|
||||
let client = client.clone();
|
||||
let recipient_key = recipient_key.clone();
|
||||
let timeout_ms = timeout_ms;
|
||||
async move {
|
||||
let mut req = client.fetch_wait_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_recipient_key(&recipient_key);
|
||||
p.set_timeout_ms(timeout_ms);
|
||||
p.set_channel_id(&[]);
|
||||
p.set_version(1);
|
||||
p.set_limit(0); // fetch all
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req.send().promise.await.context("fetch_wait RPC failed")?;
|
||||
|
||||
let list = resp
|
||||
.get()
|
||||
.context("fetch_wait: bad response")?
|
||||
.get_payloads()
|
||||
.context("fetch_wait: missing payloads")?;
|
||||
|
||||
let mut payloads = Vec::with_capacity(list.len() as usize);
|
||||
for i in 0..list.len() {
|
||||
let entry = list.get(i);
|
||||
let seq = entry.get_seq();
|
||||
let data = entry
|
||||
.get_data()
|
||||
.context("fetch_wait: envelope data read failed")?
|
||||
.to_vec();
|
||||
payloads.push((seq, data));
|
||||
}
|
||||
|
||||
Ok(payloads)
|
||||
}
|
||||
},
|
||||
DEFAULT_MAX_RETRIES,
|
||||
DEFAULT_BASE_DELAY_MS,
|
||||
anyhow_is_retriable,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Upload a hybrid (X25519 + ML-KEM-768) public key for an identity.
|
||||
pub async fn upload_hybrid_key(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
hybrid_pk: &HybridPublicKey,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut req = client.upload_hybrid_key_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
p.set_hybrid_public_key(&hybrid_pk.to_bytes());
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
req.send()
|
||||
.promise
|
||||
.await
|
||||
.context("upload_hybrid_key RPC failed")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Fetch a peer's hybrid public key from the server.
|
||||
///
|
||||
/// Returns `None` if the peer has not uploaded a hybrid key.
|
||||
pub async fn fetch_hybrid_key(
|
||||
client: &node_service::Client,
|
||||
identity_key: &[u8],
|
||||
) -> anyhow::Result<Option<HybridPublicKey>> {
|
||||
let mut req = client.fetch_hybrid_key_request();
|
||||
{
|
||||
let mut p = req.get();
|
||||
p.set_identity_key(identity_key);
|
||||
let mut auth = p.reborrow().init_auth();
|
||||
set_auth(&mut auth)?;
|
||||
}
|
||||
|
||||
let resp = req
|
||||
.send()
|
||||
.promise
|
||||
.await
|
||||
.context("fetch_hybrid_key RPC failed")?;
|
||||
|
||||
let pk_bytes = resp
|
||||
.get()
|
||||
.context("fetch_hybrid_key: bad response")?
|
||||
.get_hybrid_public_key()
|
||||
.context("fetch_hybrid_key: missing field")?
|
||||
.to_vec();
|
||||
|
||||
if pk_bytes.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let pk = HybridPublicKey::from_bytes(&pk_bytes).context("invalid hybrid public key")?;
|
||||
Ok(Some(pk))
|
||||
}
|
||||
|
||||
/// Decrypt a hybrid envelope. Requires a hybrid key; no fallback to plaintext MLS.
|
||||
pub fn try_hybrid_decrypt(
|
||||
hybrid_kp: Option<&quicnprotochat_core::HybridKeypair>,
|
||||
payload: &[u8],
|
||||
) -> anyhow::Result<Vec<u8>> {
|
||||
let kp = hybrid_kp.ok_or_else(|| anyhow::anyhow!("hybrid key required for decryption"))?;
|
||||
quicnprotochat_core::hybrid_decrypt(kp, payload).map_err(|e| anyhow::anyhow!("{e}"))
|
||||
}
|
||||
|
||||
/// Return the current Unix timestamp in milliseconds.
|
||||
pub fn current_timestamp_ms() -> u64 {
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_millis() as u64
|
||||
}
|
||||
225
crates/quicnprotochat-client/src/client/state.rs
Normal file
225
crates/quicnprotochat-client/src/client/state.rs
Normal file
@@ -0,0 +1,225 @@
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use argon2::Argon2;
|
||||
use chacha20poly1305::{
|
||||
aead::{Aead, KeyInit},
|
||||
ChaCha20Poly1305, Key, Nonce,
|
||||
};
|
||||
use rand::RngCore;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use quicnprotochat_core::{DiskKeyStore, GroupMember, HybridKeypair, HybridKeypairBytes, IdentityKeypair};
|
||||
|
||||
/// Magic bytes for encrypted client state files.
|
||||
const STATE_MAGIC: &[u8; 4] = b"QPCE";
|
||||
const STATE_SALT_LEN: usize = 16;
|
||||
const STATE_NONCE_LEN: usize = 12;
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct StoredState {
|
||||
pub identity_seed: [u8; 32],
|
||||
pub group: Option<Vec<u8>>,
|
||||
/// Post-quantum hybrid keypair (X25519 + ML-KEM-768). `None` for state created before hybrid was added.
|
||||
#[serde(default)]
|
||||
pub hybrid_key: Option<HybridKeypairBytes>,
|
||||
/// Cached member public keys for group participants.
|
||||
#[serde(default)]
|
||||
pub member_keys: Vec<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl StoredState {
|
||||
pub fn into_parts(self, state_path: &Path) -> anyhow::Result<(GroupMember, Option<HybridKeypair>)> {
|
||||
let identity = Arc::new(IdentityKeypair::from_seed(self.identity_seed));
|
||||
let group = self
|
||||
.group
|
||||
.map(|bytes| bincode::deserialize(&bytes).context("decode group"))
|
||||
.transpose()?;
|
||||
let key_store = DiskKeyStore::persistent(keystore_path(state_path))?;
|
||||
let member = GroupMember::new_with_state(identity, key_store, group);
|
||||
|
||||
let hybrid_kp = self
|
||||
.hybrid_key
|
||||
.map(|bytes| HybridKeypair::from_bytes(&bytes).context("decode hybrid key"))
|
||||
.transpose()?;
|
||||
|
||||
Ok((member, hybrid_kp))
|
||||
}
|
||||
|
||||
pub fn from_parts(member: &GroupMember, hybrid_kp: Option<&HybridKeypair>) -> anyhow::Result<Self> {
|
||||
let group = member
|
||||
.group_ref()
|
||||
.map(|g| bincode::serialize(g).context("serialize group"))
|
||||
.transpose()?;
|
||||
|
||||
Ok(Self {
|
||||
identity_seed: member.identity_seed(),
|
||||
group,
|
||||
hybrid_key: hybrid_kp.map(|kp| kp.to_bytes()),
|
||||
member_keys: Vec::new(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Derive a 32-byte key from a password and salt using Argon2id.
|
||||
fn derive_state_key(password: &str, salt: &[u8]) -> anyhow::Result<[u8; 32]> {
|
||||
let mut key = [0u8; 32];
|
||||
Argon2::default()
|
||||
.hash_password_into(password.as_bytes(), salt, &mut key)
|
||||
.map_err(|e| anyhow::anyhow!("argon2 key derivation failed: {e}"))?;
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
/// Encrypt `plaintext` with the QPCE format: magic(4) | salt(16) | nonce(12) | ciphertext.
|
||||
pub fn encrypt_state(password: &str, plaintext: &[u8]) -> anyhow::Result<Vec<u8>> {
|
||||
let mut salt = [0u8; STATE_SALT_LEN];
|
||||
rand::rngs::OsRng.fill_bytes(&mut salt);
|
||||
|
||||
let mut nonce_bytes = [0u8; STATE_NONCE_LEN];
|
||||
rand::rngs::OsRng.fill_bytes(&mut nonce_bytes);
|
||||
|
||||
let key = derive_state_key(password, &salt)?;
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&key));
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let ciphertext = cipher
|
||||
.encrypt(nonce, plaintext)
|
||||
.map_err(|e| anyhow::anyhow!("state encryption failed: {e}"))?;
|
||||
|
||||
let mut out = Vec::with_capacity(4 + STATE_SALT_LEN + STATE_NONCE_LEN + ciphertext.len());
|
||||
out.extend_from_slice(STATE_MAGIC);
|
||||
out.extend_from_slice(&salt);
|
||||
out.extend_from_slice(&nonce_bytes);
|
||||
out.extend_from_slice(&ciphertext);
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
/// Decrypt a QPCE-formatted state file.
|
||||
pub fn decrypt_state(password: &str, data: &[u8]) -> anyhow::Result<Vec<u8>> {
|
||||
let header_len = 4 + STATE_SALT_LEN + STATE_NONCE_LEN;
|
||||
anyhow::ensure!(
|
||||
data.len() > header_len,
|
||||
"encrypted state file too short ({} bytes)",
|
||||
data.len()
|
||||
);
|
||||
|
||||
let salt = &data[4..4 + STATE_SALT_LEN];
|
||||
let nonce_bytes = &data[4 + STATE_SALT_LEN..header_len];
|
||||
let ciphertext = &data[header_len..];
|
||||
|
||||
let key = derive_state_key(password, salt)?;
|
||||
let cipher = ChaCha20Poly1305::new(Key::from_slice(&key));
|
||||
let nonce = Nonce::from_slice(nonce_bytes);
|
||||
|
||||
let plaintext = cipher
|
||||
.decrypt(nonce, ciphertext)
|
||||
.map_err(|_| anyhow::anyhow!("state decryption failed (wrong password?)"))?;
|
||||
|
||||
Ok(plaintext)
|
||||
}
|
||||
|
||||
/// Returns true if raw bytes begin with the QPCE magic header.
|
||||
pub fn is_encrypted_state(bytes: &[u8]) -> bool {
|
||||
bytes.len() >= 4 && &bytes[..4] == STATE_MAGIC
|
||||
}
|
||||
|
||||
pub fn load_or_init_state(path: &Path, password: Option<&str>) -> anyhow::Result<StoredState> {
|
||||
if path.exists() {
|
||||
let mut state = load_existing_state(path, password)?;
|
||||
// Generate hybrid keypair if missing (upgrade from older state).
|
||||
if state.hybrid_key.is_none() {
|
||||
state.hybrid_key = Some(HybridKeypair::generate().to_bytes());
|
||||
write_state(path, &state, password)?;
|
||||
}
|
||||
return Ok(state);
|
||||
}
|
||||
|
||||
let identity = IdentityKeypair::generate();
|
||||
let hybrid_kp = HybridKeypair::generate();
|
||||
let key_store = DiskKeyStore::persistent(keystore_path(path))?;
|
||||
let member = GroupMember::new_with_state(Arc::new(identity), key_store, None);
|
||||
let state = StoredState::from_parts(&member, Some(&hybrid_kp))?;
|
||||
write_state(path, &state, password)?;
|
||||
Ok(state)
|
||||
}
|
||||
|
||||
pub fn load_existing_state(path: &Path, password: Option<&str>) -> anyhow::Result<StoredState> {
|
||||
let bytes = std::fs::read(path).with_context(|| format!("read state file {path:?}"))?;
|
||||
|
||||
if is_encrypted_state(&bytes) {
|
||||
let pw = password
|
||||
.context("state file is encrypted (QPCE); a password is required to decrypt it")?;
|
||||
let plaintext = decrypt_state(pw, &bytes)?;
|
||||
bincode::deserialize(&plaintext).context("decode encrypted state")
|
||||
} else {
|
||||
bincode::deserialize(&bytes).context("decode state")
|
||||
}
|
||||
}
|
||||
|
||||
pub fn save_state(
|
||||
path: &Path,
|
||||
member: &GroupMember,
|
||||
hybrid_kp: Option<&HybridKeypair>,
|
||||
password: Option<&str>,
|
||||
) -> anyhow::Result<()> {
|
||||
let state = StoredState::from_parts(member, hybrid_kp)?;
|
||||
write_state(path, &state, password)
|
||||
}
|
||||
|
||||
pub fn write_state(path: &Path, state: &StoredState, password: Option<&str>) -> anyhow::Result<()> {
|
||||
if let Some(parent) = path.parent() {
|
||||
std::fs::create_dir_all(parent).with_context(|| format!("create dir {parent:?}"))?;
|
||||
}
|
||||
let plaintext = bincode::serialize(state).context("encode state")?;
|
||||
|
||||
let bytes = if let Some(pw) = password {
|
||||
encrypt_state(pw, &plaintext)?
|
||||
} else {
|
||||
plaintext
|
||||
};
|
||||
|
||||
std::fs::write(path, bytes).with_context(|| format!("write state {path:?}"))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decode_identity_key(hex_str: &str) -> anyhow::Result<Vec<u8>> {
|
||||
let bytes = super::hex::decode(hex_str)
|
||||
.map_err(|e| anyhow::anyhow!(e))
|
||||
.context("identity key must be hex")?;
|
||||
anyhow::ensure!(bytes.len() == 32, "identity key must be 32 bytes");
|
||||
Ok(bytes)
|
||||
}
|
||||
|
||||
pub fn keystore_path(state_path: &Path) -> PathBuf {
|
||||
let mut path = state_path.to_path_buf();
|
||||
path.set_extension("ks");
|
||||
path
|
||||
}
|
||||
|
||||
pub fn sha256(bytes: &[u8]) -> Vec<u8> {
|
||||
use sha2::{Digest, Sha256};
|
||||
Sha256::digest(bytes).to_vec()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn encrypt_decrypt_roundtrip() {
|
||||
let plaintext = b"test state data";
|
||||
let password = "test-password";
|
||||
let encrypted = encrypt_state(password, plaintext).unwrap();
|
||||
assert!(is_encrypted_state(&encrypted));
|
||||
let decrypted = decrypt_state(password, &encrypted).unwrap();
|
||||
assert_eq!(decrypted, plaintext);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_password_fails() {
|
||||
let plaintext = b"test state data";
|
||||
let encrypted = encrypt_state("correct", plaintext).unwrap();
|
||||
assert!(decrypt_state("wrong", &encrypted).is_err());
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -5,9 +5,9 @@ use std::path::PathBuf;
|
||||
use clap::{Parser, Subcommand};
|
||||
|
||||
use quicnprotochat_client::{
|
||||
cmd_check_key, cmd_create_group, cmd_demo_group, cmd_fetch_key, cmd_health, cmd_invite,
|
||||
cmd_join, cmd_login, cmd_ping, cmd_recv, cmd_register, cmd_register_state, cmd_register_user,
|
||||
cmd_send, cmd_whoami, init_auth, ClientAuth,
|
||||
cmd_chat, cmd_check_key, cmd_create_group, cmd_demo_group, cmd_fetch_key, cmd_health,
|
||||
cmd_invite, cmd_join, cmd_login, cmd_ping, cmd_recv, cmd_register, cmd_register_state,
|
||||
cmd_refresh_keypackage, cmd_register_user, cmd_send, cmd_whoami, init_auth, ClientAuth,
|
||||
};
|
||||
|
||||
// ── CLI ───────────────────────────────────────────────────────────────────────
|
||||
@@ -78,6 +78,15 @@ enum Command {
|
||||
username: String,
|
||||
#[arg(long)]
|
||||
password: String,
|
||||
/// Hex-encoded Ed25519 identity key (64 hex chars). Optional if --state is provided.
|
||||
#[arg(long)]
|
||||
identity_key: Option<String>,
|
||||
/// State file to derive the identity key (requires same password if encrypted).
|
||||
#[arg(long)]
|
||||
state: Option<PathBuf>,
|
||||
/// Password for the encrypted state file (if any).
|
||||
#[arg(long)]
|
||||
state_password: Option<String>,
|
||||
},
|
||||
|
||||
/// Show local identity key, fingerprint, group status, and hybrid key status.
|
||||
@@ -132,7 +141,7 @@ enum Command {
|
||||
identity_key: String,
|
||||
},
|
||||
|
||||
/// Run a full Alice/Bob MLS round-trip against live AS and DS endpoints.
|
||||
/// Run a two-party MLS demo (creator + joiner) against live AS and DS.
|
||||
DemoGroup {
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
@@ -154,6 +163,22 @@ enum Command {
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Refresh the KeyPackage on the server (existing state only).
|
||||
/// Run periodically (e.g. before server TTL ~24h) or after your KeyPackage was consumed so others can invite you.
|
||||
RefreshKeypackage {
|
||||
/// State file path (identity + MLS state).
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
|
||||
/// Server address (host:port).
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
},
|
||||
|
||||
/// Create a persistent group and save state to disk.
|
||||
CreateGroup {
|
||||
/// State file path (identity + MLS state).
|
||||
@@ -210,9 +235,12 @@ enum Command {
|
||||
state: PathBuf,
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
/// Recipient identity key (hex, 32 bytes -> 64 chars).
|
||||
/// Recipient identity key (hex, 32 bytes -> 64 chars). Omit when using --all.
|
||||
#[arg(long)]
|
||||
peer_key: String,
|
||||
peer_key: Option<String>,
|
||||
/// Send to all other group members (N-way groups).
|
||||
#[arg(long)]
|
||||
all: bool,
|
||||
/// Plaintext message to send.
|
||||
#[arg(long)]
|
||||
msg: String,
|
||||
@@ -237,6 +265,25 @@ enum Command {
|
||||
#[arg(long)]
|
||||
stream: bool,
|
||||
},
|
||||
|
||||
/// Interactive 1:1 chat: type to send, incoming messages printed as [peer] <msg>. Ctrl+D to exit.
|
||||
/// In a two-person group, peer is chosen automatically; use --peer-key only with 3+ members.
|
||||
Chat {
|
||||
#[arg(
|
||||
long,
|
||||
default_value = "quicnprotochat-state.bin",
|
||||
env = "QUICNPROTOCHAT_STATE"
|
||||
)]
|
||||
state: PathBuf,
|
||||
#[arg(long, default_value = "127.0.0.1:7000", env = "QUICNPROTOCHAT_SERVER")]
|
||||
server: String,
|
||||
/// Peer identity key (hex, 64 chars). Omit in a two-person group to use the only other member.
|
||||
#[arg(long)]
|
||||
peer_key: Option<String>,
|
||||
/// How often to poll for incoming messages (milliseconds).
|
||||
#[arg(long, default_value_t = 500)]
|
||||
poll_interval_ms: u64,
|
||||
},
|
||||
}
|
||||
|
||||
// ── Entry point ───────────────────────────────────────────────────────────────
|
||||
@@ -272,6 +319,7 @@ async fn main() -> anyhow::Result<()> {
|
||||
&args.server_name,
|
||||
&username,
|
||||
&password,
|
||||
None,
|
||||
))
|
||||
.await
|
||||
}
|
||||
@@ -279,6 +327,9 @@ async fn main() -> anyhow::Result<()> {
|
||||
server,
|
||||
username,
|
||||
password,
|
||||
identity_key,
|
||||
state,
|
||||
state_password,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
@@ -288,6 +339,9 @@ async fn main() -> anyhow::Result<()> {
|
||||
&args.server_name,
|
||||
&username,
|
||||
&password,
|
||||
identity_key.as_deref(),
|
||||
state.as_deref(),
|
||||
state_password.as_deref(),
|
||||
))
|
||||
.await
|
||||
}
|
||||
@@ -351,6 +405,18 @@ async fn main() -> anyhow::Result<()> {
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::RefreshKeypackage { state, server } => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_refresh_keypackage(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
state_pw,
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::CreateGroup {
|
||||
state,
|
||||
server,
|
||||
@@ -394,6 +460,7 @@ async fn main() -> anyhow::Result<()> {
|
||||
state,
|
||||
server,
|
||||
peer_key,
|
||||
all,
|
||||
msg,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
@@ -403,7 +470,8 @@ async fn main() -> anyhow::Result<()> {
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
&peer_key,
|
||||
peer_key.as_deref(),
|
||||
all,
|
||||
&msg,
|
||||
state_pw,
|
||||
))
|
||||
@@ -428,5 +496,24 @@ async fn main() -> anyhow::Result<()> {
|
||||
))
|
||||
.await
|
||||
}
|
||||
Command::Chat {
|
||||
state,
|
||||
server,
|
||||
peer_key,
|
||||
poll_interval_ms,
|
||||
} => {
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local
|
||||
.run_until(cmd_chat(
|
||||
&state,
|
||||
&server,
|
||||
&args.ca_cert,
|
||||
&args.server_name,
|
||||
peer_key.as_deref(),
|
||||
state_pw,
|
||||
poll_interval_ms,
|
||||
))
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,8 +5,10 @@ use std::{path::PathBuf, process::Command, time::Duration};
|
||||
|
||||
use assert_cmd::cargo::cargo_bin;
|
||||
use portpicker::pick_unused_port;
|
||||
use rand::RngCore;
|
||||
use tempfile::TempDir;
|
||||
use tokio::time::sleep;
|
||||
use hex;
|
||||
|
||||
// Required by rustls 0.23 when QUIC/TLS is used from this process (e.g. client in test).
|
||||
fn ensure_rustls_provider() {
|
||||
@@ -14,8 +16,9 @@ fn ensure_rustls_provider() {
|
||||
}
|
||||
|
||||
use quicnprotochat_client::{
|
||||
cmd_create_group, cmd_invite, cmd_join, cmd_ping, cmd_register_state, cmd_send, connect_node,
|
||||
fetch_wait, init_auth, ClientAuth,
|
||||
cmd_create_group, cmd_invite, cmd_join, cmd_login, cmd_ping, cmd_register_state,
|
||||
cmd_register_user, cmd_send, connect_node, enqueue, fetch_wait, init_auth,
|
||||
receive_pending_plaintexts, ClientAuth,
|
||||
};
|
||||
use quicnprotochat_core::IdentityKeypair;
|
||||
|
||||
@@ -45,6 +48,8 @@ async fn wait_for_health(server: &str, ca_cert: &PathBuf, server_name: &str) ->
|
||||
anyhow::bail!("server health never became ready")
|
||||
}
|
||||
|
||||
/// Creator and joiner register; creator creates group and invites joiner; joiner joins;
|
||||
/// creator sends a message; assert joiner's mailbox receives it.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
|
||||
ensure_rustls_provider();
|
||||
@@ -72,6 +77,7 @@ async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
|
||||
.arg(&tls_key)
|
||||
.arg("--auth-token")
|
||||
.arg(auth_token)
|
||||
.arg("--allow-insecure-auth")
|
||||
.spawn()
|
||||
.expect("spawn server");
|
||||
|
||||
@@ -91,15 +97,14 @@ async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
|
||||
// Set client auth context.
|
||||
init_auth(ClientAuth::from_parts(auth_token.to_string(), None));
|
||||
|
||||
// LocalSet for capnp !Send operations.
|
||||
let local = tokio::task::LocalSet::new();
|
||||
|
||||
let alice_state = base.join("alice.bin");
|
||||
let bob_state = base.join("bob.bin");
|
||||
let creator_state = base.join("creator.bin");
|
||||
let joiner_state = base.join("joiner.bin");
|
||||
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&alice_state,
|
||||
&creator_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
@@ -109,7 +114,7 @@ async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
|
||||
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&bob_state,
|
||||
&joiner_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
@@ -118,52 +123,475 @@ async fn e2e_happy_path_register_invite_join_send_recv() -> anyhow::Result<()> {
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_create_group(&alice_state, &server, "test-group", None))
|
||||
.run_until(cmd_create_group(&creator_state, &server, "test-group", None))
|
||||
.await?;
|
||||
|
||||
// Load Bob identity key from persisted state to use as peer key.
|
||||
let bob_bytes = std::fs::read(&bob_state)?;
|
||||
let bob_state_compat: StoredStateCompat = bincode::deserialize(&bob_bytes)?;
|
||||
let bob_identity = IdentityKeypair::from_seed(bob_state_compat.identity_seed);
|
||||
let bob_pk_hex = hex_encode(&bob_identity.public_key_bytes());
|
||||
let joiner_bytes = std::fs::read(&joiner_state)?;
|
||||
let joiner_state_compat: StoredStateCompat = bincode::deserialize(&joiner_bytes)?;
|
||||
let joiner_identity = IdentityKeypair::from_seed(joiner_state_compat.identity_seed);
|
||||
let joiner_pk_hex = hex_encode(&joiner_identity.public_key_bytes());
|
||||
|
||||
local
|
||||
.run_until(cmd_invite(
|
||||
&alice_state,
|
||||
&creator_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
&bob_pk_hex,
|
||||
&joiner_pk_hex,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_join(&bob_state, &server, &ca_cert, "localhost", None))
|
||||
.run_until(cmd_join(&joiner_state, &server, &ca_cert, "localhost", None))
|
||||
.await?;
|
||||
|
||||
// Send Alice -> Bob.
|
||||
local
|
||||
.run_until(cmd_send(
|
||||
&alice_state,
|
||||
&creator_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
&bob_pk_hex,
|
||||
"hello bob",
|
||||
Some(&joiner_pk_hex),
|
||||
false,
|
||||
"hello",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
// Confirm Bob can fetch at least one payload.
|
||||
local
|
||||
.run_until(async {
|
||||
let client = connect_node(&server, &ca_cert, "localhost").await?;
|
||||
let payloads = fetch_wait(&client, &bob_identity.public_key_bytes(), 1000).await?;
|
||||
anyhow::ensure!(!payloads.is_empty(), "no payloads delivered to Bob");
|
||||
let payloads = fetch_wait(&client, &joiner_identity.public_key_bytes(), 1000).await?;
|
||||
anyhow::ensure!(!payloads.is_empty(), "no payloads delivered to joiner");
|
||||
Ok::<(), anyhow::Error>(())
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Three-party group: A creates group, invites B then C; B and C join; A sends, B and C receive;
|
||||
/// B sends, A and C receive.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn e2e_three_party_group_invite_join_send_recv() -> anyhow::Result<()> {
|
||||
ensure_rustls_provider();
|
||||
|
||||
let temp = TempDir::new()?;
|
||||
let base = temp.path();
|
||||
let port = pick_unused_port().expect("free port");
|
||||
let listen = format!("127.0.0.1:{port}");
|
||||
let server = listen.clone();
|
||||
let ca_cert = base.join("server-cert.der");
|
||||
let tls_key = base.join("server-key.der");
|
||||
let data_dir = base.join("data");
|
||||
let auth_token = "devtoken";
|
||||
|
||||
let server_bin = cargo_bin("quicnprotochat-server");
|
||||
let child = Command::new(server_bin)
|
||||
.arg("--listen")
|
||||
.arg(&listen)
|
||||
.arg("--data-dir")
|
||||
.arg(&data_dir)
|
||||
.arg("--tls-cert")
|
||||
.arg(&ca_cert)
|
||||
.arg("--tls-key")
|
||||
.arg(&tls_key)
|
||||
.arg("--auth-token")
|
||||
.arg(auth_token)
|
||||
.arg("--allow-insecure-auth")
|
||||
.spawn()
|
||||
.expect("spawn server");
|
||||
|
||||
struct ChildGuard(std::process::Child);
|
||||
impl Drop for ChildGuard {
|
||||
fn drop(&mut self) {
|
||||
let _ = self.0.kill();
|
||||
}
|
||||
}
|
||||
let _child_guard = ChildGuard(child);
|
||||
|
||||
wait_for_health(&server, &ca_cert, "localhost").await?;
|
||||
init_auth(ClientAuth::from_parts(auth_token.to_string(), None));
|
||||
|
||||
let local = tokio::task::LocalSet::new();
|
||||
|
||||
let creator_state = base.join("creator.bin");
|
||||
let b_state = base.join("b.bin");
|
||||
let c_state = base.join("c.bin");
|
||||
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&creator_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&b_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&c_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
let b_bytes = std::fs::read(&b_state)?;
|
||||
let b_compat: StoredStateCompat = bincode::deserialize(&b_bytes)?;
|
||||
let b_pk_hex = hex_encode(&IdentityKeypair::from_seed(b_compat.identity_seed).public_key_bytes());
|
||||
|
||||
let c_bytes = std::fs::read(&c_state)?;
|
||||
let c_compat: StoredStateCompat = bincode::deserialize(&c_bytes)?;
|
||||
let c_pk_hex = hex_encode(&IdentityKeypair::from_seed(c_compat.identity_seed).public_key_bytes());
|
||||
|
||||
local
|
||||
.run_until(cmd_create_group(&creator_state, &server, "test-group", None))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_invite(
|
||||
&creator_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
&b_pk_hex,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_invite(
|
||||
&creator_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
&c_pk_hex,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_join(&b_state, &server, &ca_cert, "localhost", None))
|
||||
.await?;
|
||||
local
|
||||
.run_until(cmd_join(&c_state, &server, &ca_cert, "localhost", None))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_send(
|
||||
&creator_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
true,
|
||||
"hello",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
sleep(Duration::from_millis(150)).await;
|
||||
|
||||
let b_plaintexts = local
|
||||
.run_until(receive_pending_plaintexts(
|
||||
&b_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
1500,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
let c_plaintexts = local
|
||||
.run_until(receive_pending_plaintexts(
|
||||
&c_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
1500,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
anyhow::ensure!(
|
||||
b_plaintexts.iter().any(|p| p.as_slice() == b"hello"),
|
||||
"B did not receive 'hello', got {:?}",
|
||||
b_plaintexts
|
||||
);
|
||||
anyhow::ensure!(
|
||||
c_plaintexts.iter().any(|p| p.as_slice() == b"hello"),
|
||||
"C did not receive 'hello', got {:?}",
|
||||
c_plaintexts
|
||||
);
|
||||
|
||||
local
|
||||
.run_until(cmd_send(
|
||||
&b_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
true,
|
||||
"hi",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
sleep(Duration::from_millis(200)).await;
|
||||
|
||||
let a_plaintexts = local
|
||||
.run_until(receive_pending_plaintexts(
|
||||
&creator_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
1500,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
let c_plaintexts2 = local
|
||||
.run_until(receive_pending_plaintexts(
|
||||
&c_state,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
1500,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
anyhow::ensure!(
|
||||
a_plaintexts.iter().any(|p| p.as_slice() == b"hi"),
|
||||
"A did not receive 'hi', got {:?}",
|
||||
a_plaintexts
|
||||
);
|
||||
anyhow::ensure!(
|
||||
c_plaintexts2.iter().any(|p| p.as_slice() == b"hi"),
|
||||
"C did not receive 'hi', got {:?}",
|
||||
c_plaintexts2
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Login should refuse if the presented identity key does not match the registered key.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn e2e_login_rejects_mismatched_identity() -> anyhow::Result<()> {
|
||||
ensure_rustls_provider();
|
||||
|
||||
let temp = TempDir::new()?;
|
||||
let base = temp.path();
|
||||
let port = pick_unused_port().expect("free port");
|
||||
let listen = format!("127.0.0.1:{port}");
|
||||
let server = listen.clone();
|
||||
let ca_cert = base.join("server-cert.der");
|
||||
let tls_key = base.join("server-key.der");
|
||||
let data_dir = base.join("data");
|
||||
let auth_token = "devtoken";
|
||||
|
||||
// Spawn server binary.
|
||||
let server_bin = cargo_bin("quicnprotochat-server");
|
||||
let child = Command::new(server_bin)
|
||||
.arg("--listen")
|
||||
.arg(&listen)
|
||||
.arg("--data-dir")
|
||||
.arg(&data_dir)
|
||||
.arg("--tls-cert")
|
||||
.arg(&ca_cert)
|
||||
.arg("--tls-key")
|
||||
.arg(&tls_key)
|
||||
.arg("--auth-token")
|
||||
.arg(auth_token)
|
||||
.arg("--allow-insecure-auth")
|
||||
.spawn()
|
||||
.expect("spawn server");
|
||||
|
||||
struct ChildGuard(std::process::Child);
|
||||
impl Drop for ChildGuard {
|
||||
fn drop(&mut self) {
|
||||
let _ = self.0.kill();
|
||||
}
|
||||
}
|
||||
let child_guard = ChildGuard(child);
|
||||
let _ = child_guard;
|
||||
|
||||
wait_for_health(&server, &ca_cert, "localhost").await?;
|
||||
|
||||
init_auth(ClientAuth::from_parts(auth_token.to_string(), None));
|
||||
|
||||
let local = tokio::task::LocalSet::new();
|
||||
let state_path = base.join("user.bin");
|
||||
|
||||
// Register and persist state (includes identity key binding).
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&state_path,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
// Register the user with the bound identity so login can enforce mismatches.
|
||||
let state_bytes = std::fs::read(&state_path)?;
|
||||
let stored_state: StoredStateCompat = bincode::deserialize(&state_bytes)?;
|
||||
let identity_hex = hex::encode(
|
||||
IdentityKeypair::from_seed(stored_state.identity_seed).public_key_bytes(),
|
||||
);
|
||||
|
||||
local
|
||||
.run_until(cmd_register_user(
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
"user1",
|
||||
"pass",
|
||||
Some(&identity_hex),
|
||||
))
|
||||
.await?;
|
||||
|
||||
// Craft an unrelated identity key and attempt login with it.
|
||||
let mut bogus_identity = [0u8; 32];
|
||||
rand::thread_rng().fill_bytes(&mut bogus_identity);
|
||||
let bogus_hex = hex::encode(bogus_identity);
|
||||
|
||||
let result = local
|
||||
.run_until(cmd_login(
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
"user1",
|
||||
"pass",
|
||||
Some(&bogus_hex),
|
||||
None,
|
||||
None,
|
||||
))
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(_) => anyhow::bail!("login unexpectedly succeeded with mismatched identity"),
|
||||
Err(e) => {
|
||||
// Show the full error chain so we can match the server's E016 response.
|
||||
let msg = format!("{e:#}");
|
||||
anyhow::ensure!(
|
||||
msg.contains("identity") || msg.contains("E016"),
|
||||
"login failed but not for identity mismatch: {msg}"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Sealed Sender: enqueue with valid token (no identity binding) succeeds; recipient can fetch.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn e2e_sealed_sender_enqueue_then_fetch() -> anyhow::Result<()> {
|
||||
ensure_rustls_provider();
|
||||
|
||||
let temp = TempDir::new()?;
|
||||
let base = temp.path();
|
||||
let port = pick_unused_port().expect("free port");
|
||||
let listen = format!("127.0.0.1:{port}");
|
||||
let server = listen.clone();
|
||||
let ca_cert = base.join("server-cert.der");
|
||||
let tls_key = base.join("server-key.der");
|
||||
let data_dir = base.join("data");
|
||||
let auth_token = "devtoken";
|
||||
|
||||
let server_bin = cargo_bin("quicnprotochat-server");
|
||||
let child = Command::new(server_bin)
|
||||
.arg("--listen")
|
||||
.arg(&listen)
|
||||
.arg("--data-dir")
|
||||
.arg(&data_dir)
|
||||
.arg("--tls-cert")
|
||||
.arg(&ca_cert)
|
||||
.arg("--tls-key")
|
||||
.arg(&tls_key)
|
||||
.arg("--auth-token")
|
||||
.arg(auth_token)
|
||||
.arg("--allow-insecure-auth")
|
||||
.arg("--sealed-sender")
|
||||
.spawn()
|
||||
.expect("spawn server");
|
||||
|
||||
struct ChildGuard(std::process::Child);
|
||||
impl Drop for ChildGuard {
|
||||
fn drop(&mut self) {
|
||||
let _ = self.0.kill();
|
||||
}
|
||||
}
|
||||
let _child_guard = ChildGuard(child);
|
||||
|
||||
wait_for_health(&server, &ca_cert, "localhost").await?;
|
||||
init_auth(ClientAuth::from_parts(auth_token.to_string(), None));
|
||||
|
||||
let local = tokio::task::LocalSet::new();
|
||||
let state_path = base.join("recipient.bin");
|
||||
|
||||
local
|
||||
.run_until(cmd_register_state(
|
||||
&state_path,
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
let state_bytes = std::fs::read(&state_path)?;
|
||||
let stored: StoredStateCompat = bincode::deserialize(&state_bytes)?;
|
||||
let recipient_key = IdentityKeypair::from_seed(stored.identity_seed).public_key_bytes();
|
||||
let identity_hex = hex_encode(&recipient_key);
|
||||
|
||||
local
|
||||
.run_until(cmd_register_user(
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
"recipient",
|
||||
"pass",
|
||||
Some(&identity_hex),
|
||||
))
|
||||
.await?;
|
||||
|
||||
local
|
||||
.run_until(cmd_login(
|
||||
&server,
|
||||
&ca_cert,
|
||||
"localhost",
|
||||
"recipient",
|
||||
"pass",
|
||||
Some(&identity_hex),
|
||||
None,
|
||||
None,
|
||||
))
|
||||
.await?;
|
||||
|
||||
let client = local.run_until(connect_node(&server, &ca_cert, "localhost")).await?;
|
||||
local
|
||||
.run_until(enqueue(&client, &recipient_key, b"sealed-payload"))
|
||||
.await?;
|
||||
|
||||
let payloads = local
|
||||
.run_until(fetch_wait(&client, &recipient_key, 500))
|
||||
.await?;
|
||||
anyhow::ensure!(
|
||||
payloads.len() == 1 && payloads[0].1.as_slice() == b"sealed-payload",
|
||||
"expected one payload 'sealed-payload', got {:?}",
|
||||
payloads
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
258
crates/quicnprotochat-core/src/app_message.rs
Normal file
258
crates/quicnprotochat-core/src/app_message.rs
Normal file
@@ -0,0 +1,258 @@
|
||||
//! Rich application-layer message format for MLS application payloads.
|
||||
//!
|
||||
//! The server sees only opaque ciphertext; structure lives in this client-defined
|
||||
//! plaintext schema. All messages use: version byte (1) + message_type byte + type-specific payload.
|
||||
//!
|
||||
//! # Message ID
|
||||
//!
|
||||
//! `message_id` is assigned by the sender (16 random bytes) and included in the
|
||||
//! serialized payload for Chat (and implied for Reply/Reaction/ReadReceipt via ref_msg_id).
|
||||
//! Recipients can store message_ids to reference them in replies or reactions.
|
||||
|
||||
use crate::error::CoreError;
|
||||
use rand::RngCore;
|
||||
|
||||
/// Current schema version.
|
||||
pub const VERSION: u8 = 1;
|
||||
|
||||
/// Message type discriminant (one byte).
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
#[repr(u8)]
|
||||
pub enum MessageType {
|
||||
Chat = 0x01,
|
||||
Reply = 0x02,
|
||||
Reaction = 0x03,
|
||||
ReadReceipt = 0x04,
|
||||
Typing = 0x05,
|
||||
}
|
||||
|
||||
impl MessageType {
|
||||
fn from_byte(b: u8) -> Option<Self> {
|
||||
match b {
|
||||
0x01 => Some(MessageType::Chat),
|
||||
0x02 => Some(MessageType::Reply),
|
||||
0x03 => Some(MessageType::Reaction),
|
||||
0x04 => Some(MessageType::ReadReceipt),
|
||||
0x05 => Some(MessageType::Typing),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Parsed application message (one of the rich types).
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub enum AppMessage {
|
||||
/// Plain chat: body (UTF-8). message_id is included so recipients can store and reference it.
|
||||
Chat {
|
||||
message_id: [u8; 16],
|
||||
body: Vec<u8>,
|
||||
},
|
||||
Reply {
|
||||
ref_msg_id: [u8; 16],
|
||||
body: Vec<u8>,
|
||||
},
|
||||
Reaction {
|
||||
ref_msg_id: [u8; 16],
|
||||
emoji: Vec<u8>,
|
||||
},
|
||||
ReadReceipt {
|
||||
msg_id: [u8; 16],
|
||||
},
|
||||
Typing {
|
||||
/// 0 = stopped, 1 = typing
|
||||
active: u8,
|
||||
},
|
||||
}
|
||||
|
||||
/// Generate a new 16-byte message ID (e.g. for Chat/Reply so recipients can reference it).
|
||||
pub fn generate_message_id() -> [u8; 16] {
|
||||
let mut id = [0u8; 16];
|
||||
rand::rngs::OsRng.fill_bytes(&mut id);
|
||||
id
|
||||
}
|
||||
|
||||
// ── Layout (minimal, no Cap'n Proto) ─────────────────────────────────────────
|
||||
//
|
||||
// All messages: [version: 1][type: 1][payload...]
|
||||
//
|
||||
// Chat: [msg_id: 16][body_len: 2 BE][body]
|
||||
// Reply: [ref_msg_id: 16][body_len: 2 BE][body]
|
||||
// Reaction: [ref_msg_id: 16][emoji_len: 1][emoji]
|
||||
// ReadReceipt: [msg_id: 16]
|
||||
// Typing: [active: 1] 0 = stopped, 1 = typing
|
||||
|
||||
/// Serialize a rich message into the application payload format.
|
||||
pub fn serialize(msg_type: MessageType, payload: &[u8]) -> Vec<u8> {
|
||||
let mut out = Vec::with_capacity(2 + payload.len());
|
||||
out.push(VERSION);
|
||||
out.push(msg_type as u8);
|
||||
out.extend_from_slice(payload);
|
||||
out
|
||||
}
|
||||
|
||||
/// Serialize a Chat message (generates message_id internally; pass None to generate, or Some(id) when replying with a known id).
|
||||
pub fn serialize_chat(body: &[u8], message_id: Option<[u8; 16]>) -> Vec<u8> {
|
||||
let id = message_id.unwrap_or_else(generate_message_id);
|
||||
let mut payload = Vec::with_capacity(16 + 2 + body.len());
|
||||
payload.extend_from_slice(&id);
|
||||
payload.extend_from_slice(&(body.len() as u16).to_be_bytes());
|
||||
payload.extend_from_slice(body);
|
||||
serialize(MessageType::Chat, &payload)
|
||||
}
|
||||
|
||||
/// Serialize a Reply message.
|
||||
pub fn serialize_reply(ref_msg_id: [u8; 16], body: &[u8]) -> Vec<u8> {
|
||||
let mut payload = Vec::with_capacity(16 + 2 + body.len());
|
||||
payload.extend_from_slice(&ref_msg_id);
|
||||
payload.extend_from_slice(&(body.len() as u16).to_be_bytes());
|
||||
payload.extend_from_slice(body);
|
||||
serialize(MessageType::Reply, &payload)
|
||||
}
|
||||
|
||||
/// Serialize a Reaction message.
|
||||
pub fn serialize_reaction(ref_msg_id: [u8; 16], emoji: &[u8]) -> Result<Vec<u8>, CoreError> {
|
||||
if emoji.len() > 255 {
|
||||
return Err(CoreError::AppMessage("emoji length > 255".into()));
|
||||
}
|
||||
let mut payload = Vec::with_capacity(16 + 1 + emoji.len());
|
||||
payload.extend_from_slice(&ref_msg_id);
|
||||
payload.push(emoji.len() as u8);
|
||||
payload.extend_from_slice(emoji);
|
||||
Ok(serialize(MessageType::Reaction, &payload))
|
||||
}
|
||||
|
||||
/// Serialize a ReadReceipt message.
|
||||
pub fn serialize_read_receipt(msg_id: [u8; 16]) -> Vec<u8> {
|
||||
serialize(MessageType::ReadReceipt, &msg_id)
|
||||
}
|
||||
|
||||
/// Serialize a Typing message (active: 0 = stopped, 1 = typing).
|
||||
pub fn serialize_typing(active: u8) -> Vec<u8> {
|
||||
let payload = [active];
|
||||
serialize(MessageType::Typing, &payload)
|
||||
}
|
||||
|
||||
/// Parse bytes into (MessageType, AppMessage). Fails if version/type unknown or payload too short.
|
||||
pub fn parse(bytes: &[u8]) -> Result<(MessageType, AppMessage), CoreError> {
|
||||
if bytes.len() < 2 {
|
||||
return Err(CoreError::AppMessage("payload too short (need version + type)".into()));
|
||||
}
|
||||
let version = bytes[0];
|
||||
if version != VERSION {
|
||||
return Err(CoreError::AppMessage(format!("unsupported version {version}").into()));
|
||||
}
|
||||
let msg_type = MessageType::from_byte(bytes[1])
|
||||
.ok_or_else(|| CoreError::AppMessage(format!("unknown message type {}", bytes[1]).into()))?;
|
||||
let payload = &bytes[2..];
|
||||
|
||||
let app = match msg_type {
|
||||
MessageType::Chat => parse_chat(payload)?,
|
||||
MessageType::Reply => parse_reply(payload)?,
|
||||
MessageType::Reaction => parse_reaction(payload)?,
|
||||
MessageType::ReadReceipt => parse_read_receipt(payload)?,
|
||||
MessageType::Typing => parse_typing(payload)?,
|
||||
};
|
||||
Ok((msg_type, app))
|
||||
}
|
||||
|
||||
fn parse_chat(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.len() < 16 + 2 {
|
||||
return Err(CoreError::AppMessage("Chat payload too short".into()));
|
||||
}
|
||||
let mut message_id = [0u8; 16];
|
||||
message_id.copy_from_slice(&payload[..16]);
|
||||
let body_len = u16::from_be_bytes([payload[16], payload[17]]) as usize;
|
||||
if payload.len() < 18 + body_len {
|
||||
return Err(CoreError::AppMessage("Chat body length exceeds payload".into()));
|
||||
}
|
||||
let body = payload[18..18 + body_len].to_vec();
|
||||
Ok(AppMessage::Chat { message_id, body })
|
||||
}
|
||||
|
||||
fn parse_reply(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.len() < 16 + 2 {
|
||||
return Err(CoreError::AppMessage("Reply payload too short".into()));
|
||||
}
|
||||
let mut ref_msg_id = [0u8; 16];
|
||||
ref_msg_id.copy_from_slice(&payload[..16]);
|
||||
let body_len = u16::from_be_bytes([payload[16], payload[17]]) as usize;
|
||||
if payload.len() < 18 + body_len {
|
||||
return Err(CoreError::AppMessage("Reply body length exceeds payload".into()));
|
||||
}
|
||||
let body = payload[18..18 + body_len].to_vec();
|
||||
Ok(AppMessage::Reply { ref_msg_id, body })
|
||||
}
|
||||
|
||||
fn parse_reaction(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.len() < 16 + 1 {
|
||||
return Err(CoreError::AppMessage("Reaction payload too short".into()));
|
||||
}
|
||||
let mut ref_msg_id = [0u8; 16];
|
||||
ref_msg_id.copy_from_slice(&payload[..16]);
|
||||
let emoji_len = payload[16] as usize;
|
||||
if payload.len() < 17 + emoji_len {
|
||||
return Err(CoreError::AppMessage("Reaction emoji length exceeds payload".into()));
|
||||
}
|
||||
let emoji = payload[17..17 + emoji_len].to_vec();
|
||||
Ok(AppMessage::Reaction { ref_msg_id, emoji })
|
||||
}
|
||||
|
||||
fn parse_read_receipt(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.len() < 16 {
|
||||
return Err(CoreError::AppMessage("ReadReceipt payload too short".into()));
|
||||
}
|
||||
let mut msg_id = [0u8; 16];
|
||||
msg_id.copy_from_slice(&payload[..16]);
|
||||
Ok(AppMessage::ReadReceipt { msg_id })
|
||||
}
|
||||
|
||||
fn parse_typing(payload: &[u8]) -> Result<AppMessage, CoreError> {
|
||||
if payload.is_empty() {
|
||||
return Err(CoreError::AppMessage("Typing payload empty".into()));
|
||||
}
|
||||
Ok(AppMessage::Typing { active: payload[0] })
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn roundtrip_chat() {
|
||||
let body = b"hello";
|
||||
let encoded = serialize_chat(body, None);
|
||||
let (t, msg) = parse(&encoded).unwrap();
|
||||
assert_eq!(t, MessageType::Chat);
|
||||
match &msg {
|
||||
AppMessage::Chat { message_id: _, body: b } => assert_eq!(b.as_slice(), body),
|
||||
_ => panic!("expected Chat"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_reply() {
|
||||
let ref_id = [1u8; 16];
|
||||
let body = b"reply text";
|
||||
let encoded = serialize_reply(ref_id, body);
|
||||
let (t, msg) = parse(&encoded).unwrap();
|
||||
assert_eq!(t, MessageType::Reply);
|
||||
match &msg {
|
||||
AppMessage::Reply { ref_msg_id, body: b } => {
|
||||
assert_eq!(ref_msg_id, &ref_id);
|
||||
assert_eq!(b.as_slice(), body);
|
||||
}
|
||||
_ => panic!("expected Reply"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_typing() {
|
||||
let encoded = serialize_typing(1);
|
||||
let (t, msg) = parse(&encoded).unwrap();
|
||||
assert_eq!(t, MessageType::Typing);
|
||||
match &msg {
|
||||
AppMessage::Typing { active } => assert_eq!(*active, 1),
|
||||
_ => panic!("expected Typing"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -18,4 +18,12 @@ pub enum CoreError {
|
||||
/// A hybrid KEM (X25519 + ML-KEM-768) operation failed.
|
||||
#[error("hybrid KEM error: {0}")]
|
||||
HybridKem(#[from] crate::hybrid_kem::HybridKemError),
|
||||
|
||||
/// IO or persistence failure.
|
||||
#[error("io error: {0}")]
|
||||
Io(String),
|
||||
|
||||
/// Application message (rich payload) parse or serialisation error.
|
||||
#[error("app message: {0}")]
|
||||
AppMessage(String),
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
//! in Welcome messages. `new_from_welcome` is called with `ratchet_tree = None`;
|
||||
//! openmls extracts the tree from the Welcome's `GroupInfo` extension.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::{path::Path, sync::Arc};
|
||||
|
||||
use openmls::prelude::{
|
||||
Ciphersuite, Credential, CredentialType, CredentialWithKey, CryptoConfig, GroupId, KeyPackage,
|
||||
@@ -78,6 +78,16 @@ impl GroupMember {
|
||||
Self::new_with_state(identity, DiskKeyStore::ephemeral(), None)
|
||||
}
|
||||
|
||||
/// Create a `GroupMember` with a persistent keystore at `path`.
|
||||
pub fn new_persistent(
|
||||
identity: Arc<IdentityKeypair>,
|
||||
path: impl AsRef<Path>,
|
||||
) -> Result<Self, CoreError> {
|
||||
let key_store = DiskKeyStore::persistent(path)
|
||||
.map_err(|e| CoreError::Io(format!("keystore: {e}")))?;
|
||||
Ok(Self::new_with_state(identity, key_store, None))
|
||||
}
|
||||
|
||||
/// Create a `GroupMember` from pre-existing state (identity + optional group + store).
|
||||
pub fn new_with_state(
|
||||
identity: Arc<IdentityKeypair>,
|
||||
@@ -332,6 +342,58 @@ impl GroupMember {
|
||||
}
|
||||
}
|
||||
|
||||
/// Process an incoming TLS-encoded MLS message and return sender identity + plaintext for application messages.
|
||||
///
|
||||
/// Same as [`receive_message`], but for Application messages returns
|
||||
/// `Some((sender_identity_bytes, plaintext))` so the client can display who sent the message.
|
||||
/// `sender_identity_bytes` is the MLS credential identity (e.g. Ed25519 public key for Basic credential).
|
||||
///
|
||||
/// Returns `Ok(None)` for Commit and Proposal messages (group state is updated internally).
|
||||
pub fn receive_message_with_sender(
|
||||
&mut self,
|
||||
mut bytes: &[u8],
|
||||
) -> Result<Option<(Vec<u8>, Vec<u8>)>, CoreError> {
|
||||
let group = self
|
||||
.group
|
||||
.as_mut()
|
||||
.ok_or_else(|| CoreError::Mls("no active group".into()))?;
|
||||
|
||||
let msg_in = openmls::prelude::MlsMessageIn::tls_deserialize(&mut bytes)
|
||||
.map_err(|e| CoreError::Mls(format!("message deserialise: {e:?}")))?;
|
||||
|
||||
let protocol_message = match msg_in.extract() {
|
||||
MlsMessageInBody::PrivateMessage(m) => ProtocolMessage::PrivateMessage(m),
|
||||
MlsMessageInBody::PublicMessage(m) => ProtocolMessage::PublicMessage(m),
|
||||
_ => return Err(CoreError::Mls("not a protocol message".into())),
|
||||
};
|
||||
|
||||
let processed = group
|
||||
.process_message(&self.backend, protocol_message)
|
||||
.map_err(|e| CoreError::Mls(format!("process_message: {e:?}")))?;
|
||||
|
||||
let sender_identity = processed.credential().identity().to_vec();
|
||||
|
||||
match processed.into_content() {
|
||||
ProcessedMessageContent::ApplicationMessage(app) => {
|
||||
Ok(Some((sender_identity, app.into_bytes())))
|
||||
}
|
||||
ProcessedMessageContent::StagedCommitMessage(staged) => {
|
||||
group
|
||||
.merge_staged_commit(&self.backend, *staged)
|
||||
.map_err(|e| CoreError::Mls(format!("merge_staged_commit: {e:?}")))?;
|
||||
Ok(None)
|
||||
}
|
||||
ProcessedMessageContent::ProposalMessage(proposal) => {
|
||||
group.store_pending_proposal(*proposal);
|
||||
Ok(None)
|
||||
}
|
||||
ProcessedMessageContent::ExternalJoinProposalMessage(proposal) => {
|
||||
group.store_pending_proposal(*proposal);
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── Accessors ─────────────────────────────────────────────────────────────
|
||||
|
||||
/// Return the MLS group ID bytes, or `None` if no group is active.
|
||||
@@ -398,45 +460,42 @@ impl GroupMember {
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
/// Full two-party MLS round-trip: create group → add member → exchange messages.
|
||||
/// Full two-party MLS round-trip: creator creates group, adds joiner, then they exchange messages.
|
||||
#[test]
|
||||
fn two_party_mls_round_trip() {
|
||||
let alice_id = Arc::new(IdentityKeypair::generate());
|
||||
let bob_id = Arc::new(IdentityKeypair::generate());
|
||||
let creator_id = Arc::new(IdentityKeypair::generate());
|
||||
let joiner_id = Arc::new(IdentityKeypair::generate());
|
||||
|
||||
let mut alice = GroupMember::new(Arc::clone(&alice_id));
|
||||
let mut bob = GroupMember::new(Arc::clone(&bob_id));
|
||||
let mut creator = GroupMember::new(Arc::clone(&creator_id));
|
||||
let mut joiner = GroupMember::new(Arc::clone(&joiner_id));
|
||||
|
||||
// Bob generates a KeyPackage (stored in bob's backend key store).
|
||||
let bob_kp = bob.generate_key_package().expect("Bob KeyPackage");
|
||||
let joiner_kp = joiner
|
||||
.generate_key_package()
|
||||
.expect("joiner KeyPackage");
|
||||
|
||||
// Alice creates the group.
|
||||
alice
|
||||
creator
|
||||
.create_group(b"test-group-m3")
|
||||
.expect("Alice create group");
|
||||
.expect("creator create group");
|
||||
|
||||
// Alice adds Bob → (commit, welcome).
|
||||
// Alice is the sole existing member, so she merges the commit herself.
|
||||
let (_, welcome) = alice.add_member(&bob_kp).expect("Alice add Bob");
|
||||
let (_, welcome) = creator
|
||||
.add_member(&joiner_kp)
|
||||
.expect("creator add joiner");
|
||||
|
||||
// Bob joins via the Welcome. His backend holds the matching init key.
|
||||
bob.join_group(&welcome).expect("Bob join group");
|
||||
joiner.join_group(&welcome).expect("joiner join group");
|
||||
|
||||
// Alice → Bob: application message.
|
||||
let ct_a = alice.send_message(b"hello bob").expect("Alice send");
|
||||
let pt_b = bob
|
||||
.receive_message(&ct_a)
|
||||
.expect("Bob recv")
|
||||
.expect("should be application message");
|
||||
assert_eq!(pt_b, b"hello bob");
|
||||
let ct_creator = creator.send_message(b"hello").expect("creator send");
|
||||
let pt_joiner = joiner
|
||||
.receive_message(&ct_creator)
|
||||
.expect("joiner recv")
|
||||
.expect("application message");
|
||||
assert_eq!(pt_joiner, b"hello");
|
||||
|
||||
// Bob → Alice: reply.
|
||||
let ct_b = bob.send_message(b"hello alice").expect("Bob send");
|
||||
let pt_a = alice
|
||||
.receive_message(&ct_b)
|
||||
.expect("Alice recv")
|
||||
.expect("should be application message");
|
||||
assert_eq!(pt_a, b"hello alice");
|
||||
let ct_joiner = joiner.send_message(b"hello back").expect("joiner send");
|
||||
let pt_creator = creator
|
||||
.receive_message(&ct_joiner)
|
||||
.expect("creator recv")
|
||||
.expect("application message");
|
||||
assert_eq!(pt_creator, b"hello back");
|
||||
}
|
||||
|
||||
/// `group_id()` returns None before create_group, Some afterwards.
|
||||
|
||||
442
crates/quicnprotochat-core/src/hybrid_crypto.rs
Normal file
442
crates/quicnprotochat-core/src/hybrid_crypto.rs
Normal file
@@ -0,0 +1,442 @@
|
||||
//! Post-quantum hybrid crypto provider for OpenMLS (M7 PoC).
|
||||
//!
|
||||
//! Uses X25519 + ML-KEM-768 hybrid KEM for HPKE operations where openmls
|
||||
//! would use DHKEM(X25519), and delegates all other operations (AEAD, hash,
|
||||
//! signatures, KDF, randomness) to `openmls_rust_crypto::RustCrypto`.
|
||||
//!
|
||||
//! # Key format
|
||||
//!
|
||||
//! When the provider sees a **hybrid public key** (length `HYBRID_PUBLIC_KEY_LEN` =
|
||||
//! 32 + 1184 bytes) or **hybrid private key** (length `HYBRID_PRIVATE_KEY_LEN` =
|
||||
//! 32 + 2400 bytes), it uses `hybrid_kem` for HPKE. Otherwise it delegates to
|
||||
//! RustCrypto (classical X25519 HPKE).
|
||||
//!
|
||||
//! # MLS compatibility
|
||||
//!
|
||||
//! The current MLS ciphersuite (MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519)
|
||||
//! uses 32-byte X25519 init keys in the wire format. This provider can produce
|
||||
//! and consume **hybrid** init keys (1216-byte public, 2432-byte private), but
|
||||
//! that is a non-standard extension: other MLS implementations will not
|
||||
//! accept KeyPackages with hybrid init keys unless they implement the same
|
||||
//! extension. This PoC validates that the OpenMLS trait surface is satisfiable
|
||||
//! with a custom HPKE backend; full interoperability would require a new
|
||||
//! ciphersuite or protocol extension.
|
||||
|
||||
use openmls_rust_crypto::RustCrypto;
|
||||
use openmls_traits::{
|
||||
crypto::OpenMlsCrypto,
|
||||
types::{
|
||||
CryptoError, ExporterSecret, HpkeCiphertext, HpkeConfig, HpkeKeyPair, HpkeKemType,
|
||||
},
|
||||
OpenMlsCryptoProvider,
|
||||
};
|
||||
use tls_codec::SecretVLBytes;
|
||||
|
||||
use crate::hybrid_kem::{
|
||||
hybrid_decapsulate_only, hybrid_decrypt, hybrid_encapsulate_only, hybrid_encrypt,
|
||||
hybrid_export, HybridKeypair, HybridPublicKey,
|
||||
HYBRID_KEM_OUTPUT_LEN, HYBRID_PRIVATE_KEY_LEN, HYBRID_PUBLIC_KEY_LEN,
|
||||
};
|
||||
use crate::keystore::DiskKeyStore;
|
||||
|
||||
// Re-export types used by OpenMlsCrypto (full path for clarity).
|
||||
use openmls_traits::types::{
|
||||
AeadType, Ciphersuite, HashType, SignatureScheme,
|
||||
};
|
||||
|
||||
/// Crypto backend that uses hybrid KEM for HPKE when keys are in hybrid format,
|
||||
/// and delegates everything else to RustCrypto.
|
||||
#[derive(Debug)]
|
||||
pub struct HybridCrypto {
|
||||
rust_crypto: RustCrypto,
|
||||
}
|
||||
|
||||
impl HybridCrypto {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
rust_crypto: RustCrypto::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Expose the underlying RustCrypto for rand() and delegation.
|
||||
pub fn rust_crypto(&self) -> &RustCrypto {
|
||||
&self.rust_crypto
|
||||
}
|
||||
|
||||
fn is_hybrid_public_key(pk_r: &[u8]) -> bool {
|
||||
pk_r.len() == HYBRID_PUBLIC_KEY_LEN
|
||||
}
|
||||
|
||||
fn is_hybrid_private_key(sk_r: &[u8]) -> bool {
|
||||
sk_r.len() == HYBRID_PRIVATE_KEY_LEN
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for HybridCrypto {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl OpenMlsCrypto for HybridCrypto {
|
||||
fn supports(&self, ciphersuite: Ciphersuite) -> Result<(), CryptoError> {
|
||||
self.rust_crypto.supports(ciphersuite)
|
||||
}
|
||||
|
||||
fn supported_ciphersuites(&self) -> Vec<Ciphersuite> {
|
||||
self.rust_crypto.supported_ciphersuites()
|
||||
}
|
||||
|
||||
fn hkdf_extract(
|
||||
&self,
|
||||
hash_type: HashType,
|
||||
salt: &[u8],
|
||||
ikm: &[u8],
|
||||
) -> Result<SecretVLBytes, CryptoError> {
|
||||
self.rust_crypto.hkdf_extract(hash_type, salt, ikm)
|
||||
}
|
||||
|
||||
fn hkdf_expand(
|
||||
&self,
|
||||
hash_type: HashType,
|
||||
prk: &[u8],
|
||||
info: &[u8],
|
||||
okm_len: usize,
|
||||
) -> Result<SecretVLBytes, CryptoError> {
|
||||
self.rust_crypto.hkdf_expand(hash_type, prk, info, okm_len)
|
||||
}
|
||||
|
||||
fn hash(&self, hash_type: HashType, data: &[u8]) -> Result<Vec<u8>, CryptoError> {
|
||||
self.rust_crypto.hash(hash_type, data)
|
||||
}
|
||||
|
||||
fn aead_encrypt(
|
||||
&self,
|
||||
alg: AeadType,
|
||||
key: &[u8],
|
||||
data: &[u8],
|
||||
nonce: &[u8],
|
||||
aad: &[u8],
|
||||
) -> Result<Vec<u8>, CryptoError> {
|
||||
self.rust_crypto.aead_encrypt(alg, key, data, nonce, aad)
|
||||
}
|
||||
|
||||
fn aead_decrypt(
|
||||
&self,
|
||||
alg: AeadType,
|
||||
key: &[u8],
|
||||
ct_tag: &[u8],
|
||||
nonce: &[u8],
|
||||
aad: &[u8],
|
||||
) -> Result<Vec<u8>, CryptoError> {
|
||||
self.rust_crypto.aead_decrypt(alg, key, ct_tag, nonce, aad)
|
||||
}
|
||||
|
||||
fn signature_key_gen(&self, alg: SignatureScheme) -> Result<(Vec<u8>, Vec<u8>), CryptoError> {
|
||||
self.rust_crypto.signature_key_gen(alg)
|
||||
}
|
||||
|
||||
fn verify_signature(
|
||||
&self,
|
||||
alg: SignatureScheme,
|
||||
data: &[u8],
|
||||
pk: &[u8],
|
||||
signature: &[u8],
|
||||
) -> Result<(), CryptoError> {
|
||||
self.rust_crypto.verify_signature(alg, data, pk, signature)
|
||||
}
|
||||
|
||||
fn sign(&self, alg: SignatureScheme, data: &[u8], key: &[u8]) -> Result<Vec<u8>, CryptoError> {
|
||||
self.rust_crypto.sign(alg, data, key)
|
||||
}
|
||||
|
||||
fn hpke_seal(
|
||||
&self,
|
||||
config: HpkeConfig,
|
||||
pk_r: &[u8],
|
||||
info: &[u8],
|
||||
aad: &[u8],
|
||||
ptxt: &[u8],
|
||||
) -> HpkeCiphertext {
|
||||
if Self::is_hybrid_public_key(pk_r) {
|
||||
let recipient_pk = match HybridPublicKey::from_bytes(pk_r) {
|
||||
Ok(pk) => pk,
|
||||
Err(_) => return self.rust_crypto.hpke_seal(config, pk_r, info, aad, ptxt),
|
||||
};
|
||||
match hybrid_encrypt(&recipient_pk, ptxt) {
|
||||
Ok(envelope) => {
|
||||
let kem_output = envelope[..HYBRID_KEM_OUTPUT_LEN].to_vec();
|
||||
let ciphertext = envelope[HYBRID_KEM_OUTPUT_LEN..].to_vec();
|
||||
HpkeCiphertext {
|
||||
kem_output: kem_output.into(),
|
||||
ciphertext: ciphertext.into(),
|
||||
}
|
||||
}
|
||||
Err(_) => self.rust_crypto.hpke_seal(config, pk_r, info, aad, ptxt),
|
||||
}
|
||||
} else {
|
||||
self.rust_crypto.hpke_seal(config, pk_r, info, aad, ptxt)
|
||||
}
|
||||
}
|
||||
|
||||
fn hpke_open(
|
||||
&self,
|
||||
config: HpkeConfig,
|
||||
input: &HpkeCiphertext,
|
||||
sk_r: &[u8],
|
||||
info: &[u8],
|
||||
aad: &[u8],
|
||||
) -> Result<Vec<u8>, CryptoError> {
|
||||
if Self::is_hybrid_private_key(sk_r) {
|
||||
let keypair = match HybridKeypair::from_private_bytes(sk_r) {
|
||||
Ok(kp) => kp,
|
||||
Err(_) => return self.rust_crypto.hpke_open(config, input, sk_r, info, aad),
|
||||
};
|
||||
let envelope: Vec<u8> = input
|
||||
.kem_output.as_slice()
|
||||
.iter()
|
||||
.chain(input.ciphertext.as_slice())
|
||||
.copied()
|
||||
.collect();
|
||||
hybrid_decrypt(&keypair, &envelope).map_err(|_| CryptoError::HpkeDecryptionError)
|
||||
} else {
|
||||
self.rust_crypto.hpke_open(config, input, sk_r, info, aad)
|
||||
}
|
||||
}
|
||||
|
||||
fn hpke_setup_sender_and_export(
|
||||
&self,
|
||||
config: HpkeConfig,
|
||||
pk_r: &[u8],
|
||||
info: &[u8],
|
||||
exporter_context: &[u8],
|
||||
exporter_length: usize,
|
||||
) -> Result<(Vec<u8>, ExporterSecret), CryptoError> {
|
||||
if Self::is_hybrid_public_key(pk_r) {
|
||||
let recipient_pk = match HybridPublicKey::from_bytes(pk_r) {
|
||||
Ok(pk) => pk,
|
||||
Err(_) => {
|
||||
return self.rust_crypto.hpke_setup_sender_and_export(
|
||||
config, pk_r, info, exporter_context, exporter_length,
|
||||
)
|
||||
}
|
||||
};
|
||||
let (kem_output, shared_secret) =
|
||||
hybrid_encapsulate_only(&recipient_pk).map_err(|_| CryptoError::SenderSetupError)?;
|
||||
let exported = hybrid_export(&shared_secret, exporter_context, exporter_length);
|
||||
Ok((kem_output, exported.into()))
|
||||
} else {
|
||||
self.rust_crypto.hpke_setup_sender_and_export(
|
||||
config, pk_r, info, exporter_context, exporter_length,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn hpke_setup_receiver_and_export(
|
||||
&self,
|
||||
config: HpkeConfig,
|
||||
enc: &[u8],
|
||||
sk_r: &[u8],
|
||||
info: &[u8],
|
||||
exporter_context: &[u8],
|
||||
exporter_length: usize,
|
||||
) -> Result<ExporterSecret, CryptoError> {
|
||||
if Self::is_hybrid_private_key(sk_r) {
|
||||
let keypair = HybridKeypair::from_private_bytes(sk_r)
|
||||
.map_err(|_| CryptoError::ReceiverSetupError)?;
|
||||
let shared_secret =
|
||||
hybrid_decapsulate_only(&keypair, enc).map_err(|_| CryptoError::ReceiverSetupError)?;
|
||||
let exported = hybrid_export(&shared_secret, exporter_context, exporter_length);
|
||||
Ok(exported.into())
|
||||
} else {
|
||||
self.rust_crypto.hpke_setup_receiver_and_export(
|
||||
config, enc, sk_r, info, exporter_context, exporter_length,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn derive_hpke_keypair(&self, config: HpkeConfig, ikm: &[u8]) -> HpkeKeyPair {
|
||||
if config.0 == HpkeKemType::DhKem25519 {
|
||||
let kp = HybridKeypair::derive_from_ikm(ikm);
|
||||
HpkeKeyPair {
|
||||
private: kp.private_to_bytes().into(),
|
||||
public: kp.public_key().to_bytes(),
|
||||
}
|
||||
} else {
|
||||
self.rust_crypto.derive_hpke_keypair(config, ikm)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// OpenMLS crypto provider that uses hybrid KEM for HPKE (when keys are in
|
||||
/// hybrid format) and delegates the rest to RustCrypto.
|
||||
#[derive(Debug)]
|
||||
pub struct HybridCryptoProvider {
|
||||
crypto: HybridCrypto,
|
||||
key_store: DiskKeyStore,
|
||||
}
|
||||
|
||||
impl HybridCryptoProvider {
|
||||
pub fn new(key_store: DiskKeyStore) -> Self {
|
||||
Self {
|
||||
crypto: HybridCrypto::new(),
|
||||
key_store,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for HybridCryptoProvider {
|
||||
fn default() -> Self {
|
||||
Self::new(DiskKeyStore::ephemeral())
|
||||
}
|
||||
}
|
||||
|
||||
impl OpenMlsCryptoProvider for HybridCryptoProvider {
|
||||
type CryptoProvider = HybridCrypto;
|
||||
type RandProvider = RustCrypto;
|
||||
type KeyStoreProvider = DiskKeyStore;
|
||||
|
||||
fn crypto(&self) -> &Self::CryptoProvider {
|
||||
&self.crypto
|
||||
}
|
||||
|
||||
fn rand(&self) -> &Self::RandProvider {
|
||||
self.crypto.rust_crypto()
|
||||
}
|
||||
|
||||
fn key_store(&self) -> &Self::KeyStoreProvider {
|
||||
&self.key_store
|
||||
}
|
||||
}
|
||||
|
||||
// ── Tests ───────────────────────────────────────────────────────────────────
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use openmls_traits::types::HpkeKdfType;
|
||||
|
||||
fn hpke_config_dhkem_x25519() -> HpkeConfig {
|
||||
HpkeConfig(
|
||||
HpkeKemType::DhKem25519,
|
||||
HpkeKdfType::HkdfSha256,
|
||||
openmls_traits::types::HpkeAeadType::AesGcm128,
|
||||
)
|
||||
}
|
||||
|
||||
/// HPKE path with hybrid keys: derive_hpke_keypair (hybrid) -> hpke_seal -> hpke_open.
|
||||
#[test]
|
||||
fn hybrid_hpke_seal_open_round_trip() {
|
||||
let crypto = HybridCrypto::new();
|
||||
let ikm = b"test-ikm-for-hybrid-hpke-keypair";
|
||||
|
||||
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm);
|
||||
assert_eq!(keypair.public.len(), HYBRID_PUBLIC_KEY_LEN);
|
||||
assert_eq!(keypair.private.as_ref().len(), HYBRID_PRIVATE_KEY_LEN);
|
||||
|
||||
let plaintext = b"hello post-quantum MLS";
|
||||
let info = b"mls 1.0 test";
|
||||
let aad = b"additional data";
|
||||
|
||||
let ct = crypto.hpke_seal(
|
||||
hpke_config_dhkem_x25519(),
|
||||
&keypair.public,
|
||||
info,
|
||||
aad,
|
||||
plaintext,
|
||||
);
|
||||
assert!(!ct.kem_output.as_slice().is_empty());
|
||||
assert!(!ct.ciphertext.as_slice().is_empty());
|
||||
|
||||
let decrypted = crypto
|
||||
.hpke_open(
|
||||
hpke_config_dhkem_x25519(),
|
||||
&ct,
|
||||
keypair.private.as_ref(),
|
||||
info,
|
||||
aad,
|
||||
)
|
||||
.expect("hpke_open with hybrid keys");
|
||||
assert_eq!(decrypted.as_slice(), plaintext);
|
||||
}
|
||||
|
||||
/// HPKE exporter path: setup_sender_and_export then setup_receiver_and_export.
|
||||
#[test]
|
||||
fn hybrid_hpke_setup_sender_receiver_export() {
|
||||
let crypto = HybridCrypto::new();
|
||||
let ikm = b"exporter-ikm";
|
||||
|
||||
let keypair = crypto.derive_hpke_keypair(hpke_config_dhkem_x25519(), ikm);
|
||||
let info = b"";
|
||||
let exporter_context = b"MLS 1.0 external init";
|
||||
let exporter_length = 32;
|
||||
|
||||
let (kem_output, sender_exported) = crypto
|
||||
.hpke_setup_sender_and_export(
|
||||
hpke_config_dhkem_x25519(),
|
||||
&keypair.public,
|
||||
info,
|
||||
exporter_context,
|
||||
exporter_length,
|
||||
)
|
||||
.expect("sender and export");
|
||||
|
||||
assert_eq!(kem_output.len(), HYBRID_KEM_OUTPUT_LEN);
|
||||
assert_eq!(sender_exported.as_ref().len(), exporter_length);
|
||||
|
||||
let receiver_exported = crypto
|
||||
.hpke_setup_receiver_and_export(
|
||||
hpke_config_dhkem_x25519(),
|
||||
&kem_output,
|
||||
keypair.private.as_ref(),
|
||||
info,
|
||||
exporter_context,
|
||||
exporter_length,
|
||||
)
|
||||
.expect("receiver and export");
|
||||
|
||||
assert_eq!(sender_exported.as_ref(), receiver_exported.as_ref());
|
||||
}
|
||||
|
||||
/// KeyPackage generation with HybridCryptoProvider (validates full HPKE path in MLS).
|
||||
#[test]
|
||||
fn key_package_generation_with_hybrid_provider() {
|
||||
use openmls::prelude::{
|
||||
Credential, CredentialType, CredentialWithKey, CryptoConfig, KeyPackage,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use tls_codec::Serialize;
|
||||
|
||||
use crate::identity::IdentityKeypair;
|
||||
|
||||
const CIPHERSUITE: Ciphersuite =
|
||||
Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
|
||||
|
||||
let provider = HybridCryptoProvider::default();
|
||||
let identity = Arc::new(IdentityKeypair::generate());
|
||||
|
||||
let credential = Credential::new(
|
||||
identity.public_key_bytes().to_vec(),
|
||||
CredentialType::Basic,
|
||||
)
|
||||
.unwrap();
|
||||
let credential_with_key = CredentialWithKey {
|
||||
credential,
|
||||
signature_key: identity.public_key_bytes().to_vec().into(),
|
||||
};
|
||||
|
||||
let key_package = KeyPackage::builder()
|
||||
.build(
|
||||
CryptoConfig::with_default_version(CIPHERSUITE),
|
||||
&provider,
|
||||
identity.as_ref(),
|
||||
credential_with_key,
|
||||
)
|
||||
.expect("KeyPackage with hybrid HPKE");
|
||||
|
||||
let bytes = key_package
|
||||
.tls_serialize_detached()
|
||||
.expect("serialize KeyPackage");
|
||||
assert!(!bytes.is_empty());
|
||||
}
|
||||
}
|
||||
@@ -28,7 +28,7 @@ use ml_kem::{
|
||||
kem::{Decapsulate, Encapsulate},
|
||||
EncodedSizeUser, KemCore, MlKem768, MlKem768Params,
|
||||
};
|
||||
use rand::{rngs::OsRng, RngCore};
|
||||
use rand::{rngs::OsRng, rngs::StdRng, CryptoRng, RngCore, SeedableRng};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::Sha256;
|
||||
use x25519_dalek::{EphemeralSecret, PublicKey as X25519Public, StaticSecret};
|
||||
@@ -55,6 +55,15 @@ pub const MLKEM_DK_LEN: usize = 2400;
|
||||
/// Envelope header: version(1) + x25519 eph pk(32) + mlkem ct(1088) + nonce(12).
|
||||
const HEADER_LEN: usize = 1 + 32 + MLKEM_CT_LEN + 12;
|
||||
|
||||
/// KEM output length (version + x25519 eph pk + mlkem ct) for HPKE adapter.
|
||||
pub const HYBRID_KEM_OUTPUT_LEN: usize = 1 + 32 + MLKEM_CT_LEN;
|
||||
|
||||
/// Hybrid public key length: x25519(32) + mlkem_ek(1184). Used to detect hybrid keys in MLS.
|
||||
pub const HYBRID_PUBLIC_KEY_LEN: usize = 32 + MLKEM_EK_LEN;
|
||||
|
||||
/// Hybrid private key length: x25519(32) + mlkem_dk(2400). Used to detect hybrid keys in MLS.
|
||||
pub const HYBRID_PRIVATE_KEY_LEN: usize = 32 + MLKEM_DK_LEN;
|
||||
|
||||
// ── Error type ──────────────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
@@ -109,12 +118,20 @@ pub struct HybridPublicKey {
|
||||
pub mlkem_ek: Vec<u8>,
|
||||
}
|
||||
|
||||
/// HKDF info for deriving HPKE keypair seed from IKM (MLS compatibility).
|
||||
const HKDF_INFO_HPKE_KEYPAIR: &[u8] = b"quicnprotochat-hybrid-hpke-keypair-v1";
|
||||
|
||||
impl HybridKeypair {
|
||||
/// Generate a fresh hybrid keypair from OS CSPRNG.
|
||||
pub fn generate() -> Self {
|
||||
let x25519_sk = StaticSecret::random_from_rng(OsRng);
|
||||
Self::generate_from_rng(&mut OsRng)
|
||||
}
|
||||
|
||||
/// Generate a hybrid keypair from a seeded RNG (deterministic).
|
||||
pub fn generate_from_rng<R: RngCore + CryptoRng>(rng: &mut R) -> Self {
|
||||
let x25519_sk = StaticSecret::random_from_rng(&mut *rng);
|
||||
let x25519_pk = X25519Public::from(&x25519_sk);
|
||||
let (mlkem_dk, mlkem_ek) = MlKem768::generate(&mut OsRng);
|
||||
let (mlkem_dk, mlkem_ek) = MlKem768::generate(rng);
|
||||
|
||||
Self {
|
||||
x25519_sk,
|
||||
@@ -124,6 +141,45 @@ impl HybridKeypair {
|
||||
}
|
||||
}
|
||||
|
||||
/// Derive a deterministic hybrid keypair from IKM (for MLS HPKE key schedule).
|
||||
pub fn derive_from_ikm(ikm: &[u8]) -> Self {
|
||||
let mut seed = [0u8; 32];
|
||||
let hk = Hkdf::<Sha256>::new(None, ikm);
|
||||
hk.expand(HKDF_INFO_HPKE_KEYPAIR, &mut seed)
|
||||
.expect("32 bytes is valid HKDF output");
|
||||
let mut rng = StdRng::from_seed(seed);
|
||||
Self::generate_from_rng(&mut rng)
|
||||
}
|
||||
|
||||
/// Serialise private key for MLS key store: x25519_sk(32) || mlkem_dk(2400).
|
||||
pub fn private_to_bytes(&self) -> Vec<u8> {
|
||||
let mut out = Vec::with_capacity(HYBRID_PRIVATE_KEY_LEN);
|
||||
out.extend_from_slice(self.x25519_sk.as_bytes());
|
||||
out.extend_from_slice(self.mlkem_dk.as_bytes().as_slice());
|
||||
out
|
||||
}
|
||||
|
||||
/// Reconstruct a hybrid keypair from private key bytes (from MLS key store).
|
||||
pub fn from_private_bytes(bytes: &[u8]) -> Result<Self, HybridKemError> {
|
||||
if bytes.len() != HYBRID_PRIVATE_KEY_LEN {
|
||||
return Err(HybridKemError::TooShort(bytes.len()));
|
||||
}
|
||||
let x25519_sk = StaticSecret::from(<[u8; 32]>::try_from(&bytes[0..32]).unwrap());
|
||||
let x25519_pk = X25519Public::from(&x25519_sk);
|
||||
|
||||
let mlkem_dk_arr = Array::try_from(&bytes[32..32 + MLKEM_DK_LEN])
|
||||
.map_err(|_| HybridKemError::InvalidMlKemKey)?;
|
||||
let mlkem_dk = DecapsulationKey::<MlKem768Params>::from_bytes(&mlkem_dk_arr);
|
||||
let mlkem_ek = mlkem_dk.encapsulation_key().clone();
|
||||
|
||||
Ok(Self {
|
||||
x25519_sk,
|
||||
x25519_pk,
|
||||
mlkem_dk,
|
||||
mlkem_ek,
|
||||
})
|
||||
}
|
||||
|
||||
/// Reconstruct from serialised bytes.
|
||||
pub fn from_bytes(bytes: &HybridKeypairBytes) -> Result<Self, HybridKemError> {
|
||||
let x25519_sk = StaticSecret::from(*bytes.x25519_sk);
|
||||
@@ -290,6 +346,78 @@ pub fn hybrid_decrypt(keypair: &HybridKeypair, envelope: &[u8]) -> Result<Vec<u8
|
||||
Ok(plaintext)
|
||||
}
|
||||
|
||||
/// Encapsulate only: compute shared secret and KEM output (no AEAD).
|
||||
/// Returns `(kem_output, shared_secret)` where `kem_output` is the first
|
||||
/// `HYBRID_KEM_OUTPUT_LEN` bytes of the hybrid envelope and `shared_secret`
|
||||
/// is the 32-byte derived key (same as used for AEAD in `hybrid_encrypt`).
|
||||
/// Used by MLS HPKE exporter (setup_sender_and_export).
|
||||
pub fn hybrid_encapsulate_only(
|
||||
recipient_pk: &HybridPublicKey,
|
||||
) -> Result<(Vec<u8>, [u8; 32]), HybridKemError> {
|
||||
let eph_secret = EphemeralSecret::random_from_rng(OsRng);
|
||||
let eph_public = X25519Public::from(&eph_secret);
|
||||
let x25519_recipient = X25519Public::from(recipient_pk.x25519_pk);
|
||||
let x25519_ss = eph_secret.diffie_hellman(&x25519_recipient);
|
||||
|
||||
let mlkem_ek_arr = Array::try_from(recipient_pk.mlkem_ek.as_slice())
|
||||
.map_err(|_| HybridKemError::InvalidMlKemKey)?;
|
||||
let mlkem_ek = EncapsulationKey::<MlKem768Params>::from_bytes(&mlkem_ek_arr);
|
||||
let (mlkem_ct, mlkem_ss) = mlkem_ek
|
||||
.encapsulate(&mut OsRng)
|
||||
.map_err(|_| HybridKemError::EncryptionFailed)?;
|
||||
|
||||
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice());
|
||||
let shared_secret = aead_key.as_slice().try_into().unwrap();
|
||||
|
||||
let mut kem_output = Vec::with_capacity(HYBRID_KEM_OUTPUT_LEN);
|
||||
kem_output.push(HYBRID_VERSION);
|
||||
kem_output.extend_from_slice(&eph_public.to_bytes());
|
||||
kem_output.extend_from_slice(mlkem_ct.as_slice());
|
||||
|
||||
Ok((kem_output, shared_secret))
|
||||
}
|
||||
|
||||
/// Decapsulate only: recover shared secret from KEM output (no AEAD).
|
||||
/// Used by MLS HPKE exporter (setup_receiver_and_export).
|
||||
pub fn hybrid_decapsulate_only(
|
||||
keypair: &HybridKeypair,
|
||||
kem_output: &[u8],
|
||||
) -> Result<[u8; 32], HybridKemError> {
|
||||
if kem_output.len() < HYBRID_KEM_OUTPUT_LEN {
|
||||
return Err(HybridKemError::TooShort(kem_output.len()));
|
||||
}
|
||||
if kem_output[0] != HYBRID_VERSION {
|
||||
return Err(HybridKemError::UnsupportedVersion(kem_output[0]));
|
||||
}
|
||||
|
||||
let eph_pk_bytes: [u8; 32] = kem_output[1..33].try_into().unwrap();
|
||||
let eph_pk = X25519Public::from(eph_pk_bytes);
|
||||
let x25519_ss = keypair.x25519_sk.diffie_hellman(&eph_pk);
|
||||
|
||||
let mlkem_ct_arr = Array::try_from(&kem_output[33..33 + MLKEM_CT_LEN])
|
||||
.map_err(|_| HybridKemError::MlKemDecapsFailed)?;
|
||||
let mlkem_ss = keypair
|
||||
.mlkem_dk
|
||||
.decapsulate(&mlkem_ct_arr)
|
||||
.map_err(|_| HybridKemError::MlKemDecapsFailed)?;
|
||||
|
||||
let aead_key = derive_aead_key(x25519_ss.as_bytes(), mlkem_ss.as_slice());
|
||||
Ok(aead_key.as_slice().try_into().unwrap())
|
||||
}
|
||||
|
||||
/// Export a secret from shared secret (MLS HPKE exporter compatibility).
|
||||
/// Uses HKDF-Expand(prk, exporter_context, length) with prk = HKDF-Extract(0, shared_secret).
|
||||
pub fn hybrid_export(
|
||||
shared_secret: &[u8; 32],
|
||||
exporter_context: &[u8],
|
||||
length: usize,
|
||||
) -> Vec<u8> {
|
||||
let hk = Hkdf::<Sha256>::new(None, shared_secret);
|
||||
let mut out = vec![0u8; length];
|
||||
hk.expand(exporter_context, &mut out).expect("valid length");
|
||||
out
|
||||
}
|
||||
|
||||
/// Derive AEAD key from the combined X25519 + ML-KEM shared secrets.
|
||||
///
|
||||
/// The nonce is generated randomly per-encryption rather than derived from
|
||||
|
||||
@@ -3,17 +3,20 @@
|
||||
//!
|
||||
//! # Module layout
|
||||
//!
|
||||
//! | Module | Responsibility |
|
||||
//! |--------------|------------------------------------------------------------------|
|
||||
//! | `error` | [`CoreError`] type |
|
||||
//! | Module | Responsibility |
|
||||
//! |---------------|------------------------------------------------------------------|
|
||||
//! | `app_message` | Rich application payload (Chat, Reply, Reaction, ReadReceipt, Typing) |
|
||||
//! | `error` | [`CoreError`] type |
|
||||
//! | `identity` | [`IdentityKeypair`] — Ed25519 identity key for MLS credentials |
|
||||
//! | `keypackage` | [`generate_key_package`] — standalone KeyPackage generation |
|
||||
//! | `group` | [`GroupMember`] — MLS group lifecycle (create/join/send/recv) |
|
||||
//! | `hybrid_kem` | Hybrid X25519 + ML-KEM-768 key encapsulation |
|
||||
//! | `keystore` | [`DiskKeyStore`] — OpenMLS key store with optional persistence |
|
||||
|
||||
mod app_message;
|
||||
mod error;
|
||||
mod group;
|
||||
pub mod hybrid_crypto;
|
||||
pub mod hybrid_kem;
|
||||
mod identity;
|
||||
mod keypackage;
|
||||
@@ -22,12 +25,17 @@ pub mod opaque_auth;
|
||||
|
||||
// ── Public API ────────────────────────────────────────────────────────────────
|
||||
|
||||
pub use app_message::{
|
||||
serialize, serialize_chat, serialize_reaction, serialize_read_receipt, serialize_reply,
|
||||
serialize_typing, parse, generate_message_id, AppMessage, MessageType, VERSION as APP_MESSAGE_VERSION,
|
||||
};
|
||||
pub use error::CoreError;
|
||||
pub use group::GroupMember;
|
||||
pub use hybrid_kem::{
|
||||
hybrid_decrypt, hybrid_encrypt, HybridKemError, HybridKeypair, HybridKeypairBytes,
|
||||
HybridPublicKey,
|
||||
};
|
||||
pub use hybrid_crypto::{HybridCrypto, HybridCryptoProvider};
|
||||
pub use identity::IdentityKeypair;
|
||||
pub use keypackage::{generate_key_package, validate_keypackage_ciphersuite};
|
||||
pub use keystore::DiskKeyStore;
|
||||
|
||||
22
crates/quicnprotochat-gui/Cargo.toml
Normal file
22
crates/quicnprotochat-gui/Cargo.toml
Normal file
@@ -0,0 +1,22 @@
|
||||
[package]
|
||||
name = "quicnprotochat-gui"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "Native GUI for quicnprotochat (Tauri 2)."
|
||||
license = "MIT"
|
||||
|
||||
[[bin]]
|
||||
name = "quicnprotochat-gui"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
quicnprotochat-core = { path = "../quicnprotochat-core" }
|
||||
quicnprotochat-client = { path = "../quicnprotochat-client" }
|
||||
quicnprotochat-proto = { path = "../quicnprotochat-proto" }
|
||||
tauri = { version = "2", features = [] }
|
||||
tokio = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
|
||||
[build-dependencies]
|
||||
tauri-build = "2"
|
||||
32
crates/quicnprotochat-gui/README.md
Normal file
32
crates/quicnprotochat-gui/README.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# quicnprotochat-gui
|
||||
|
||||
Native GUI for quicnprotochat using [Tauri 2](https://v2.tauri.app/). The UI runs in a webview; all server-facing work (capnp-rpc, `node_service::Client`) runs on a **dedicated backend thread** with a tokio `LocalSet`, since that code is `!Send`.
|
||||
|
||||
## Backend threading model
|
||||
|
||||
- A single **backend thread** runs a tokio `LocalSet` and a request-response loop.
|
||||
- The UI thread sends commands over an `mpsc` channel: `Whoami { state_path, password }` or `Health { server, ca_cert, server_name }`.
|
||||
- For each request, the backend runs sync code (whoami) or `LocalSet::run_until(async { ... })` (health). It then sends `Result<String, String>` back on the provided reply channel.
|
||||
- Tauri commands (`whoami`, `health`) block on that reply so the frontend gets a simple async-style result.
|
||||
|
||||
## How to run
|
||||
|
||||
From the workspace root:
|
||||
|
||||
```bash
|
||||
cargo run -p quicnprotochat-gui
|
||||
```
|
||||
|
||||
**Linux:** Tauri uses GTK. Install development packages if the build fails, e.g.:
|
||||
|
||||
- Debian/Ubuntu: `sudo apt install libgtk-3-dev libwebkit2gtk-4.1-dev`
|
||||
- Fedora: `sudo dnf install gtk3-devel webkit2gtk4.1-devel`
|
||||
|
||||
## Frontend
|
||||
|
||||
The frontend is static HTML in `ui/index.html` (no npm or build step). It provides:
|
||||
|
||||
- **Whoami** – state path (and optional password); calls `whoami` and shows JSON (identity_key, fingerprint, etc.).
|
||||
- **Health** – server address; calls `health` and shows server status and RTT JSON.
|
||||
|
||||
Default CA cert and server name for health are the same as the CLI (`data/server-cert.der`, `localhost`) unless overridden via optional params.
|
||||
3
crates/quicnprotochat-gui/build.rs
Normal file
3
crates/quicnprotochat-gui/build.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
fn main() {
|
||||
tauri_build::build()
|
||||
}
|
||||
11
crates/quicnprotochat-gui/capabilities/default.json
Normal file
11
crates/quicnprotochat-gui/capabilities/default.json
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"$schema": "https://schema.tauri.app/config/2/capability",
|
||||
"identifier": "default",
|
||||
"description": "Capability for the main window (custom commands whoami, health are allowed by default)",
|
||||
"windows": ["main"],
|
||||
"permissions": [
|
||||
"core:default",
|
||||
"core:window:allow-close",
|
||||
"core:window:allow-set-title"
|
||||
]
|
||||
}
|
||||
1
crates/quicnprotochat-gui/gen/schemas/acl-manifests.json
Normal file
1
crates/quicnprotochat-gui/gen/schemas/acl-manifests.json
Normal file
File diff suppressed because one or more lines are too long
1
crates/quicnprotochat-gui/gen/schemas/capabilities.json
Normal file
1
crates/quicnprotochat-gui/gen/schemas/capabilities.json
Normal file
@@ -0,0 +1 @@
|
||||
{"default":{"identifier":"default","description":"Capability for the main window (custom commands whoami, health are allowed by default)","local":true,"windows":["main"],"permissions":["core:default","core:window:allow-close","core:window:allow-set-title"]}}
|
||||
2244
crates/quicnprotochat-gui/gen/schemas/desktop-schema.json
Normal file
2244
crates/quicnprotochat-gui/gen/schemas/desktop-schema.json
Normal file
File diff suppressed because it is too large
Load Diff
2244
crates/quicnprotochat-gui/gen/schemas/linux-schema.json
Normal file
2244
crates/quicnprotochat-gui/gen/schemas/linux-schema.json
Normal file
File diff suppressed because it is too large
Load Diff
BIN
crates/quicnprotochat-gui/icons/icon.png
Normal file
BIN
crates/quicnprotochat-gui/icons/icon.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 2.1 KiB |
86
crates/quicnprotochat-gui/src/backend.rs
Normal file
86
crates/quicnprotochat-gui/src/backend.rs
Normal file
@@ -0,0 +1,86 @@
|
||||
//! Backend service running on a dedicated thread with a tokio LocalSet.
|
||||
//!
|
||||
//! All server-facing work (capnp-rpc, node_service::Client) is !Send and must run on this
|
||||
//! single thread. The UI thread sends commands over a channel; this thread runs
|
||||
//! `LocalSet::run_until` for each request and sends the result back.
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::mpsc;
|
||||
use std::thread;
|
||||
|
||||
use tokio::runtime::Builder;
|
||||
use tokio::task::LocalSet;
|
||||
|
||||
use quicnprotochat_client::{cmd_health_json, whoami_json};
|
||||
|
||||
/// Commands the UI can send to the backend thread.
|
||||
pub enum BackendCommand {
|
||||
Whoami {
|
||||
state_path: String,
|
||||
password: Option<String>,
|
||||
},
|
||||
Health {
|
||||
server: String,
|
||||
ca_cert: PathBuf,
|
||||
server_name: String,
|
||||
},
|
||||
}
|
||||
|
||||
/// Response sent back to the UI.
|
||||
pub type BackendResponse = Result<String, String>;
|
||||
|
||||
/// Spawn the backend thread and return a sender to post commands and a join handle.
|
||||
/// The backend runs a tokio LocalSet and processes one command at a time:
|
||||
/// for each received command it runs `LocalSet::run_until(future)` (for async commands)
|
||||
/// or runs sync code (whoami), then sends the result on the provided reply channel.
|
||||
pub fn spawn_backend() -> (mpsc::Sender<(BackendCommand, mpsc::Sender<BackendResponse>)>, thread::JoinHandle<()>) {
|
||||
let (tx, rx) = mpsc::channel::<(BackendCommand, mpsc::Sender<BackendResponse>)>();
|
||||
|
||||
let handle = thread::spawn(move || {
|
||||
let rt = Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("backend tokio runtime");
|
||||
let local = LocalSet::new();
|
||||
|
||||
while let Ok((cmd, reply_tx)) = rx.recv() {
|
||||
let result = run_command(&local, &rt, cmd);
|
||||
let _ = reply_tx.send(result);
|
||||
}
|
||||
});
|
||||
|
||||
(tx, handle)
|
||||
}
|
||||
|
||||
fn run_command(
|
||||
local: &LocalSet,
|
||||
rt: &tokio::runtime::Runtime,
|
||||
cmd: BackendCommand,
|
||||
) -> BackendResponse {
|
||||
match cmd {
|
||||
BackendCommand::Whoami { state_path, password } => {
|
||||
let path = PathBuf::from(&state_path);
|
||||
whoami_json(&path, password.as_deref()).map_err(|e| e.to_string())
|
||||
}
|
||||
BackendCommand::Health {
|
||||
server,
|
||||
ca_cert,
|
||||
server_name,
|
||||
} => {
|
||||
// Request-response: we run LocalSet::run_until for this single request so capnp-rpc
|
||||
// and connect_node stay on this thread (!Send).
|
||||
let fut = cmd_health_json(&server, &ca_cert, &server_name);
|
||||
rt.block_on(local.run_until(fut)).map_err(|e| e.to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Default CA cert path (relative to cwd or absolute); same default as CLI.
|
||||
pub fn default_ca_cert() -> PathBuf {
|
||||
PathBuf::from("data/server-cert.der")
|
||||
}
|
||||
|
||||
/// Default TLS server name.
|
||||
pub fn default_server_name() -> String {
|
||||
"localhost".to_string()
|
||||
}
|
||||
76
crates/quicnprotochat-gui/src/lib.rs
Normal file
76
crates/quicnprotochat-gui/src/lib.rs
Normal file
@@ -0,0 +1,76 @@
|
||||
//! quicnprotochat native GUI (Tauri 2).
|
||||
//!
|
||||
//! The backend runs on a dedicated thread with a tokio LocalSet; all server-facing
|
||||
//! work (capnp-rpc, node_service::Client) is dispatched there. Tauri commands
|
||||
//! block on the request-response channel until the backend returns.
|
||||
|
||||
mod backend;
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::mpsc;
|
||||
|
||||
use backend::{spawn_backend, BackendCommand};
|
||||
|
||||
/// Shared state: sender to the backend thread.
|
||||
struct BackendState {
|
||||
tx: mpsc::Sender<(BackendCommand, mpsc::Sender<backend::BackendResponse>)>,
|
||||
}
|
||||
|
||||
/// Runs whoami on the backend thread and returns JSON string (identity_key, fingerprint, etc.).
|
||||
#[tauri::command]
|
||||
fn whoami(
|
||||
state: tauri::State<BackendState>,
|
||||
state_path: String,
|
||||
password: Option<String>,
|
||||
) -> Result<String, String> {
|
||||
let (reply_tx, reply_rx) = mpsc::channel();
|
||||
state
|
||||
.tx
|
||||
.send((
|
||||
BackendCommand::Whoami {
|
||||
state_path,
|
||||
password,
|
||||
},
|
||||
reply_tx,
|
||||
))
|
||||
.map_err(|e| e.to_string())?;
|
||||
reply_rx.recv().map_err(|e| e.to_string())?
|
||||
}
|
||||
|
||||
/// Runs health check on the backend thread (LocalSet::run_until) and returns status JSON.
|
||||
#[tauri::command]
|
||||
fn health(
|
||||
state: tauri::State<BackendState>,
|
||||
server: String,
|
||||
ca_cert: Option<String>,
|
||||
server_name: Option<String>,
|
||||
) -> Result<String, String> {
|
||||
let ca_cert = ca_cert
|
||||
.map(PathBuf::from)
|
||||
.unwrap_or_else(backend::default_ca_cert);
|
||||
let server_name = server_name.unwrap_or_else(backend::default_server_name);
|
||||
let (reply_tx, reply_rx) = mpsc::channel();
|
||||
state
|
||||
.tx
|
||||
.send((
|
||||
BackendCommand::Health {
|
||||
server,
|
||||
ca_cert,
|
||||
server_name,
|
||||
},
|
||||
reply_tx,
|
||||
))
|
||||
.map_err(|e| e.to_string())?;
|
||||
reply_rx.recv().map_err(|e| e.to_string())?
|
||||
}
|
||||
|
||||
#[cfg_attr(mobile, tauri::mobile_entry_point)]
|
||||
pub fn run() {
|
||||
let (backend_tx, _backend_handle) = spawn_backend();
|
||||
|
||||
tauri::Builder::default()
|
||||
.manage(BackendState { tx: backend_tx })
|
||||
.invoke_handler(tauri::generate_handler![whoami, health])
|
||||
.run(tauri::generate_context!())
|
||||
.expect("error while running tauri application");
|
||||
}
|
||||
5
crates/quicnprotochat-gui/src/main.rs
Normal file
5
crates/quicnprotochat-gui/src/main.rs
Normal file
@@ -0,0 +1,5 @@
|
||||
//! Desktop entry point for quicnprotochat-gui.
|
||||
|
||||
fn main() {
|
||||
quicnprotochat_gui::run()
|
||||
}
|
||||
24
crates/quicnprotochat-gui/tauri.conf.json
Normal file
24
crates/quicnprotochat-gui/tauri.conf.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"$schema": "https://schema.tauri.app/config/2",
|
||||
"productName": "quicnprotochat-gui",
|
||||
"identifier": "chat.quicnproto.gui",
|
||||
"build": {
|
||||
"frontendDist": "./ui",
|
||||
"beforeBuildCommand": "",
|
||||
"beforeDevCommand": ""
|
||||
},
|
||||
"app": {
|
||||
"windows": [
|
||||
{
|
||||
"title": "quicnprotochat",
|
||||
"width": 640,
|
||||
"height": 480
|
||||
}
|
||||
],
|
||||
"security": {
|
||||
"csp": null
|
||||
}
|
||||
},
|
||||
"bundle": {},
|
||||
"plugins": {}
|
||||
}
|
||||
54
crates/quicnprotochat-gui/ui/index.html
Normal file
54
crates/quicnprotochat-gui/ui/index.html
Normal file
@@ -0,0 +1,54 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>quicnprotochat</title>
|
||||
<style>
|
||||
body { font-family: system-ui, sans-serif; margin: 1rem; }
|
||||
button { margin: 0.25rem; padding: 0.5rem 1rem; cursor: pointer; }
|
||||
#output { white-space: pre-wrap; background: #f0f0f0; padding: 0.75rem; margin-top: 1rem; min-height: 4rem; border-radius: 4px; }
|
||||
.error { color: #c00; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>quicnprotochat</h1>
|
||||
<p>
|
||||
<button id="whoami">Whoami</button>
|
||||
<button id="health">Health</button>
|
||||
</p>
|
||||
<label>State path: <input id="statePath" type="text" value="quicnprotochat-state.bin" size="32" /></label>
|
||||
<br />
|
||||
<label>Server: <input id="server" type="text" value="127.0.0.1:7000" size="24" /></label>
|
||||
<div id="output">Click Whoami or Health. Results appear here.</div>
|
||||
|
||||
<script>
|
||||
const output = document.getElementById('output');
|
||||
const statePath = document.getElementById('statePath');
|
||||
const server = document.getElementById('server');
|
||||
|
||||
function show(result, isError = false) {
|
||||
output.textContent = result;
|
||||
output.className = isError ? 'error' : '';
|
||||
}
|
||||
|
||||
const invoke = window.__TAURI__?.core?.invoke;
|
||||
if (!invoke) {
|
||||
show('Tauri API not available (not running inside Tauri?).', true);
|
||||
} else {
|
||||
document.getElementById('whoami').addEventListener('click', function () {
|
||||
show('Running whoami…');
|
||||
invoke('whoami', { statePath: statePath.value.trim(), password: null })
|
||||
.then(function (s) { show(s); })
|
||||
.catch(function (e) { show(String(e), true); });
|
||||
});
|
||||
document.getElementById('health').addEventListener('click', function () {
|
||||
show('Running health…');
|
||||
invoke('health', { server: server.value.trim() })
|
||||
.then(function (s) { show(s); })
|
||||
.catch(function (e) { show(String(e), true); });
|
||||
});
|
||||
}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
@@ -161,29 +161,26 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn p2p_round_trip() {
|
||||
let alice = local_node().await;
|
||||
let bob = local_node().await;
|
||||
let sender = local_node().await;
|
||||
let receiver = local_node().await;
|
||||
|
||||
let bob_addr = bob.endpoint_addr();
|
||||
let alice_id = alice.node_id();
|
||||
let payload = b"hello from alice via P2P";
|
||||
let receiver_addr = receiver.endpoint_addr();
|
||||
let sender_id = sender.node_id();
|
||||
let payload = b"hello via P2P";
|
||||
|
||||
// Spawn Bob's receiver.
|
||||
let bob_handle = tokio::spawn(async move {
|
||||
let msg = bob.recv().await.unwrap();
|
||||
let recv_handle = tokio::spawn(async move {
|
||||
let msg = receiver.recv().await.unwrap();
|
||||
assert_eq!(msg.payload, payload.to_vec());
|
||||
assert_eq!(msg.sender, alice_id);
|
||||
assert_eq!(msg.sender, sender_id);
|
||||
});
|
||||
|
||||
// Give Bob a moment to start accepting.
|
||||
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
|
||||
|
||||
alice.send(bob_addr, payload).await.unwrap();
|
||||
sender.send(receiver_addr, payload).await.unwrap();
|
||||
|
||||
// Wait for Bob to finish reading before closing.
|
||||
bob_handle.await.unwrap();
|
||||
recv_handle.await.unwrap();
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
||||
alice.close().await;
|
||||
sender.close().await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,3 +49,10 @@ serde = { workspace = true }
|
||||
# CLI
|
||||
clap = { workspace = true }
|
||||
toml = { version = "0.8" }
|
||||
|
||||
# Metrics (Prometheus)
|
||||
metrics = "0.22"
|
||||
metrics-exporter-prometheus = "0.15"
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3"
|
||||
|
||||
47
crates/quicnprotochat-server/migrations/001_initial.sql
Normal file
47
crates/quicnprotochat-server/migrations/001_initial.sql
Normal file
@@ -0,0 +1,47 @@
|
||||
CREATE TABLE IF NOT EXISTS key_packages (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
identity_key BLOB NOT NULL,
|
||||
package_data BLOB NOT NULL,
|
||||
created_at INTEGER DEFAULT (strftime('%s','now'))
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS deliveries (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
recipient_key BLOB NOT NULL,
|
||||
channel_id BLOB NOT NULL DEFAULT X'',
|
||||
payload BLOB NOT NULL,
|
||||
created_at INTEGER DEFAULT (strftime('%s','now'))
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS hybrid_keys (
|
||||
identity_key BLOB PRIMARY KEY,
|
||||
hybrid_public_key BLOB NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_kp_identity
|
||||
ON key_packages(identity_key);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_del_recipient_channel
|
||||
ON deliveries(recipient_key, channel_id);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS server_setup (
|
||||
id INTEGER PRIMARY KEY CHECK (id = 1),
|
||||
setup_data BLOB NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
username TEXT PRIMARY KEY,
|
||||
opaque_record BLOB NOT NULL,
|
||||
created_at INTEGER DEFAULT (strftime('%s','now'))
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS user_identity_keys (
|
||||
username TEXT PRIMARY KEY,
|
||||
identity_key BLOB NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS endpoints (
|
||||
identity_key BLOB PRIMARY KEY,
|
||||
node_addr BLOB NOT NULL,
|
||||
updated_at INTEGER DEFAULT (strftime('%s','now'))
|
||||
);
|
||||
21
crates/quicnprotochat-server/migrations/002_add_seq.sql
Normal file
21
crates/quicnprotochat-server/migrations/002_add_seq.sql
Normal file
@@ -0,0 +1,21 @@
|
||||
-- Migration 002: add per-inbox delivery sequence numbers.
|
||||
--
|
||||
-- Adds a `seq` column to the deliveries table and a separate counter table
|
||||
-- that tracks the next sequence number per (recipient_key, channel_id) inbox.
|
||||
-- The counter is atomically incremented on each enqueue via an UPSERT so
|
||||
-- sequence numbers are gapless even under concurrent inserts.
|
||||
--
|
||||
-- Requires SQLite >= 3.35 (RETURNING clause support; available on Ubuntu 22.04+).
|
||||
|
||||
ALTER TABLE deliveries ADD COLUMN seq INTEGER NOT NULL DEFAULT 0;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS delivery_seq_counters (
|
||||
recipient_key BLOB NOT NULL,
|
||||
channel_id BLOB NOT NULL,
|
||||
next_seq INTEGER NOT NULL DEFAULT 0,
|
||||
PRIMARY KEY (recipient_key, channel_id)
|
||||
);
|
||||
|
||||
-- Index lets ORDER BY seq queries use an index scan instead of a sort.
|
||||
CREATE INDEX IF NOT EXISTS idx_del_seq
|
||||
ON deliveries (recipient_key, channel_id, seq);
|
||||
221
crates/quicnprotochat-server/src/auth.rs
Normal file
221
crates/quicnprotochat-server/src/auth.rs
Normal file
@@ -0,0 +1,221 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use dashmap::DashMap;
|
||||
use quicnprotochat_proto::node_capnp::auth;
|
||||
use sha2::Digest;
|
||||
use subtle::ConstantTimeEq;
|
||||
use tokio::sync::Notify;
|
||||
|
||||
use crate::error_codes::*;
|
||||
|
||||
pub const SESSION_TTL_SECS: u64 = 24 * 60 * 60; // 24 hours
|
||||
pub const PENDING_LOGIN_TTL_SECS: u64 = 300; // 5 minutes
|
||||
pub const RATE_LIMIT_WINDOW_SECS: u64 = 60;
|
||||
pub const RATE_LIMIT_MAX_ENQUEUES: u32 = 100;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct AuthConfig {
|
||||
pub required_token: Option<Vec<u8>>,
|
||||
/// When true, a valid bearer token (no session) is accepted and the request's identity/key is used (dev/e2e only).
|
||||
pub allow_insecure_identity_from_request: bool,
|
||||
}
|
||||
|
||||
impl AuthConfig {
|
||||
pub fn new(required_token: Option<String>, allow_insecure_identity_from_request: bool) -> Self {
|
||||
let required_token = required_token
|
||||
.filter(|s| !s.is_empty())
|
||||
.map(|s| s.into_bytes());
|
||||
Self {
|
||||
required_token,
|
||||
allow_insecure_identity_from_request,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct SessionInfo {
|
||||
#[allow(dead_code)]
|
||||
pub username: String,
|
||||
pub identity_key: Vec<u8>,
|
||||
#[allow(dead_code)]
|
||||
pub created_at: u64,
|
||||
pub expires_at: u64,
|
||||
}
|
||||
|
||||
pub struct PendingLogin {
|
||||
pub state_bytes: Vec<u8>,
|
||||
pub created_at: u64,
|
||||
}
|
||||
|
||||
pub struct RateEntry {
|
||||
pub count: u32,
|
||||
pub window_start: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AuthContext {
|
||||
pub token: Vec<u8>,
|
||||
pub identity_key: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
pub fn current_timestamp() -> u64 {
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
|
||||
pub fn check_rate_limit(
|
||||
rate_limits: &DashMap<Vec<u8>, RateEntry>,
|
||||
token: &[u8],
|
||||
) -> Result<(), capnp::Error> {
|
||||
let now = current_timestamp();
|
||||
let mut entry = rate_limits.entry(token.to_vec()).or_insert(RateEntry {
|
||||
count: 0,
|
||||
window_start: now,
|
||||
});
|
||||
|
||||
if now - entry.window_start >= RATE_LIMIT_WINDOW_SECS {
|
||||
entry.count = 1;
|
||||
entry.window_start = now;
|
||||
} else {
|
||||
entry.count += 1;
|
||||
if entry.count > RATE_LIMIT_MAX_ENQUEUES {
|
||||
return Err(crate::error_codes::coded_error(
|
||||
E014_RATE_LIMITED,
|
||||
format!(
|
||||
"rate limit exceeded: {} enqueues in {}s window",
|
||||
RATE_LIMIT_MAX_ENQUEUES, RATE_LIMIT_WINDOW_SECS
|
||||
),
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn validate_auth(
|
||||
cfg: &AuthConfig,
|
||||
sessions: &DashMap<Vec<u8>, SessionInfo>,
|
||||
auth: Result<auth::Reader<'_>, capnp::Error>,
|
||||
) -> Result<(), capnp::Error> {
|
||||
validate_auth_context(cfg, sessions, auth).map(|_| ())
|
||||
}
|
||||
|
||||
pub fn validate_auth_context(
|
||||
cfg: &AuthConfig,
|
||||
sessions: &DashMap<Vec<u8>, SessionInfo>,
|
||||
auth: Result<auth::Reader<'_>, capnp::Error>,
|
||||
) -> Result<AuthContext, capnp::Error> {
|
||||
let auth = auth?;
|
||||
let version = auth.get_version();
|
||||
|
||||
if version != 1 {
|
||||
return Err(crate::error_codes::coded_error(
|
||||
E001_BAD_AUTH_VERSION,
|
||||
format!("unsupported auth version {} (expected 1)", version),
|
||||
));
|
||||
}
|
||||
|
||||
let token = auth
|
||||
.get_access_token()
|
||||
.map_err(|e| crate::error_codes::coded_error(E020_BAD_PARAMS, format!("auth.accessToken: {e}")))?
|
||||
.to_vec();
|
||||
|
||||
if token.is_empty() {
|
||||
return Err(crate::error_codes::coded_error(
|
||||
E002_EMPTY_TOKEN,
|
||||
"auth.version=1 requires non-empty accessToken",
|
||||
));
|
||||
}
|
||||
|
||||
if let Some(expected) = &cfg.required_token {
|
||||
if expected.len() == token.len() && bool::from(expected.ct_eq(&token)) {
|
||||
return Ok(AuthContext {
|
||||
token,
|
||||
identity_key: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(session) = sessions.get(&token) {
|
||||
let now = current_timestamp();
|
||||
if session.expires_at > now {
|
||||
let identity = if session.identity_key.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(session.identity_key.clone())
|
||||
};
|
||||
|
||||
return Ok(AuthContext {
|
||||
token,
|
||||
identity_key: identity,
|
||||
});
|
||||
}
|
||||
drop(session);
|
||||
sessions.remove(&token);
|
||||
return Err(crate::error_codes::coded_error(
|
||||
E017_SESSION_EXPIRED,
|
||||
"session token has expired",
|
||||
));
|
||||
}
|
||||
|
||||
Err(crate::error_codes::coded_error(E003_INVALID_TOKEN, "invalid accessToken"))
|
||||
}
|
||||
|
||||
pub fn require_identity<'a>(auth_ctx: &'a AuthContext) -> Result<&'a [u8], capnp::Error> {
|
||||
match auth_ctx.identity_key.as_deref() {
|
||||
Some(ik) => Ok(ik),
|
||||
None => Err(crate::error_codes::coded_error(
|
||||
E003_INVALID_TOKEN,
|
||||
"access token is not identity-bound; login required",
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn require_identity_match(auth_ctx: &AuthContext, expected: &[u8]) -> Result<(), capnp::Error> {
|
||||
let ik = require_identity(auth_ctx)?;
|
||||
if ik != expected {
|
||||
return Err(crate::error_codes::coded_error(
|
||||
E016_IDENTITY_MISMATCH,
|
||||
"access token is bound to a different identity",
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// When the token is a valid session, require it to match `request_identity`.
|
||||
/// When the token is a bearer token (no identity) and `allow_insecure_identity_from_request` is true, accept the request identity (dev/e2e).
|
||||
pub fn require_identity_or_request(
|
||||
auth_ctx: &AuthContext,
|
||||
request_identity: &[u8],
|
||||
allow_insecure: bool,
|
||||
) -> Result<(), capnp::Error> {
|
||||
match auth_ctx.identity_key.as_deref() {
|
||||
Some(_) => require_identity_match(auth_ctx, request_identity),
|
||||
None if allow_insecure => Ok(()),
|
||||
None => Err(crate::error_codes::coded_error(
|
||||
E003_INVALID_TOKEN,
|
||||
"access token is not identity-bound; login required",
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn fmt_hex(bytes: &[u8]) -> String {
|
||||
let hex: String = bytes.iter().map(|b| format!("{b:02x}")).collect();
|
||||
format!("{hex}…")
|
||||
}
|
||||
|
||||
pub fn waiter(waiters: &DashMap<Vec<u8>, Arc<Notify>>, recipient_key: &[u8]) -> Arc<Notify> {
|
||||
waiters
|
||||
.entry(recipient_key.to_vec())
|
||||
.or_insert_with(|| Arc::new(Notify::new()))
|
||||
.clone()
|
||||
}
|
||||
|
||||
pub fn fingerprint(data: &[u8]) -> Vec<u8> {
|
||||
sha2::Sha256::digest(data).to_vec()
|
||||
}
|
||||
|
||||
pub fn coded_error(code: &str, msg: impl std::fmt::Display) -> capnp::Error {
|
||||
crate::error_codes::coded_error(code, msg)
|
||||
}
|
||||
187
crates/quicnprotochat-server/src/config.rs
Normal file
187
crates/quicnprotochat-server/src/config.rs
Normal file
@@ -0,0 +1,187 @@
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::Context;
|
||||
use serde::Deserialize;
|
||||
|
||||
pub const DEFAULT_LISTEN: &str = "0.0.0.0:7000";
|
||||
pub const DEFAULT_DATA_DIR: &str = "data";
|
||||
pub const DEFAULT_TLS_CERT: &str = "data/server-cert.der";
|
||||
pub const DEFAULT_TLS_KEY: &str = "data/server-key.der";
|
||||
pub const DEFAULT_STORE_BACKEND: &str = "file";
|
||||
pub const DEFAULT_DB_PATH: &str = "data/quicnprotochat.db";
|
||||
|
||||
#[derive(Debug, Default, Deserialize)]
|
||||
pub struct FileConfig {
|
||||
pub listen: Option<String>,
|
||||
pub data_dir: Option<String>,
|
||||
pub tls_cert: Option<PathBuf>,
|
||||
pub tls_key: Option<PathBuf>,
|
||||
pub auth_token: Option<String>,
|
||||
pub allow_insecure_auth: Option<bool>,
|
||||
/// When true, enqueue does not require an identity-bound session: only a valid token is required.
|
||||
/// The server does not associate the request with a specific sender (Sealed Sender).
|
||||
#[serde(default)]
|
||||
pub sealed_sender: Option<bool>,
|
||||
pub store_backend: Option<String>,
|
||||
pub db_path: Option<PathBuf>,
|
||||
pub db_key: Option<String>,
|
||||
/// Metrics HTTP listen address (e.g. "0.0.0.0:9090"). If set, /metrics is served there.
|
||||
pub metrics_listen: Option<String>,
|
||||
/// When true and metrics_listen is set, start the metrics server.
|
||||
#[serde(default)]
|
||||
pub metrics_enabled: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct EffectiveConfig {
|
||||
pub listen: String,
|
||||
pub data_dir: String,
|
||||
pub tls_cert: PathBuf,
|
||||
pub tls_key: PathBuf,
|
||||
pub auth_token: Option<String>,
|
||||
pub allow_insecure_auth: bool,
|
||||
/// When true, enqueue does not require identity; valid token only (Sealed Sender).
|
||||
pub sealed_sender: bool,
|
||||
pub store_backend: String,
|
||||
pub db_path: PathBuf,
|
||||
pub db_key: String,
|
||||
/// If Some(addr), metrics server listens here (e.g. "0.0.0.0:9090").
|
||||
pub metrics_listen: Option<String>,
|
||||
/// Start metrics server only when true and metrics_listen is set.
|
||||
pub metrics_enabled: bool,
|
||||
}
|
||||
|
||||
pub fn load_config(path: Option<&Path>) -> anyhow::Result<FileConfig> {
|
||||
let path = match path {
|
||||
Some(p) => PathBuf::from(p),
|
||||
None => PathBuf::from("quicnprotochat-server.toml"),
|
||||
};
|
||||
|
||||
if !path.exists() {
|
||||
return Ok(FileConfig::default());
|
||||
}
|
||||
|
||||
let contents =
|
||||
std::fs::read_to_string(&path).with_context(|| format!("read config file {path:?}"))?;
|
||||
let cfg: FileConfig =
|
||||
toml::from_str(&contents).with_context(|| format!("parse config file {path:?}"))?;
|
||||
Ok(cfg)
|
||||
}
|
||||
|
||||
pub fn merge_config(args: &crate::Args, file: &FileConfig) -> EffectiveConfig {
|
||||
let listen = if args.listen == DEFAULT_LISTEN {
|
||||
file.listen
|
||||
.clone()
|
||||
.unwrap_or_else(|| DEFAULT_LISTEN.to_string())
|
||||
} else {
|
||||
args.listen.clone()
|
||||
};
|
||||
|
||||
let data_dir = if args.data_dir == DEFAULT_DATA_DIR {
|
||||
file.data_dir
|
||||
.clone()
|
||||
.unwrap_or_else(|| DEFAULT_DATA_DIR.to_string())
|
||||
} else {
|
||||
args.data_dir.clone()
|
||||
};
|
||||
|
||||
let tls_cert = if args.tls_cert == PathBuf::from(DEFAULT_TLS_CERT) {
|
||||
file.tls_cert
|
||||
.clone()
|
||||
.unwrap_or_else(|| PathBuf::from(DEFAULT_TLS_CERT))
|
||||
} else {
|
||||
args.tls_cert.clone()
|
||||
};
|
||||
|
||||
let tls_key = if args.tls_key == PathBuf::from(DEFAULT_TLS_KEY) {
|
||||
file.tls_key
|
||||
.clone()
|
||||
.unwrap_or_else(|| PathBuf::from(DEFAULT_TLS_KEY))
|
||||
} else {
|
||||
args.tls_key.clone()
|
||||
};
|
||||
|
||||
let auth_token = if args.auth_token.is_some() {
|
||||
args.auth_token.clone()
|
||||
} else {
|
||||
file.auth_token.clone()
|
||||
};
|
||||
|
||||
let allow_insecure_auth = if args.allow_insecure_auth {
|
||||
true
|
||||
} else {
|
||||
file.allow_insecure_auth.unwrap_or(false)
|
||||
};
|
||||
|
||||
let sealed_sender = args.sealed_sender || file.sealed_sender.unwrap_or(false);
|
||||
|
||||
let store_backend = if args.store_backend == DEFAULT_STORE_BACKEND {
|
||||
file.store_backend
|
||||
.clone()
|
||||
.unwrap_or_else(|| DEFAULT_STORE_BACKEND.to_string())
|
||||
} else {
|
||||
args.store_backend.clone()
|
||||
};
|
||||
|
||||
let db_path = if args.db_path == PathBuf::from(DEFAULT_DB_PATH) {
|
||||
file.db_path
|
||||
.clone()
|
||||
.unwrap_or_else(|| PathBuf::from(DEFAULT_DB_PATH))
|
||||
} else {
|
||||
args.db_path.clone()
|
||||
};
|
||||
|
||||
let db_key = if args.db_key.is_empty() {
|
||||
file.db_key.clone().unwrap_or_else(|| args.db_key.clone())
|
||||
} else {
|
||||
args.db_key.clone()
|
||||
};
|
||||
|
||||
let metrics_listen = args
|
||||
.metrics_listen
|
||||
.clone()
|
||||
.or_else(|| file.metrics_listen.clone());
|
||||
let metrics_enabled = args
|
||||
.metrics_enabled
|
||||
.or(file.metrics_enabled)
|
||||
.unwrap_or(metrics_listen.is_some());
|
||||
|
||||
EffectiveConfig {
|
||||
listen,
|
||||
data_dir,
|
||||
tls_cert,
|
||||
tls_key,
|
||||
auth_token,
|
||||
allow_insecure_auth,
|
||||
sealed_sender,
|
||||
store_backend,
|
||||
db_path,
|
||||
db_key,
|
||||
metrics_listen,
|
||||
metrics_enabled,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn validate_production_config(effective: &EffectiveConfig) -> anyhow::Result<()> {
|
||||
let token = effective
|
||||
.auth_token
|
||||
.as_deref()
|
||||
.filter(|s| !s.is_empty())
|
||||
.ok_or_else(|| {
|
||||
anyhow::anyhow!("production requires QUICNPROTOCHAT_AUTH_TOKEN (non-empty)")
|
||||
})?;
|
||||
if token == "devtoken" {
|
||||
anyhow::bail!(
|
||||
"production forbids auth_token 'devtoken'; set a strong QUICNPROTOCHAT_AUTH_TOKEN"
|
||||
);
|
||||
}
|
||||
if effective.store_backend == "sql" && effective.db_key.is_empty() {
|
||||
anyhow::bail!("production with store_backend=sql requires non-empty QUICNPROTOCHAT_DB_KEY");
|
||||
}
|
||||
if !effective.tls_cert.exists() || !effective.tls_key.exists() {
|
||||
anyhow::bail!(
|
||||
"production requires existing TLS cert and key (no auto-generation); provide QUICNPROTOCHAT_TLS_CERT and QUICNPROTOCHAT_TLS_KEY"
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
49
crates/quicnprotochat-server/src/metrics.rs
Normal file
49
crates/quicnprotochat-server/src/metrics.rs
Normal file
@@ -0,0 +1,49 @@
|
||||
//! Prometheus metrics for the server.
|
||||
//!
|
||||
//! All counters/histograms/gauges use the `metrics` crate and are exported
|
||||
//! via metrics-exporter-prometheus on a configurable HTTP port (e.g. /metrics).
|
||||
|
||||
/// Record one enqueue (success). Call after a message is enqueued.
|
||||
pub fn record_enqueue_total() {
|
||||
metrics::counter!("enqueue_total").increment(1);
|
||||
}
|
||||
|
||||
/// Record enqueued payload size in bytes.
|
||||
pub fn record_enqueue_bytes(bytes: u64) {
|
||||
metrics::counter!("enqueue_bytes_total").increment(bytes);
|
||||
}
|
||||
|
||||
/// Record one fetch (success). Call when fetch returns.
|
||||
pub fn record_fetch_total() {
|
||||
metrics::counter!("fetch_total").increment(1);
|
||||
}
|
||||
|
||||
/// Record one fetch_wait (success). Call when fetch_wait returns.
|
||||
pub fn record_fetch_wait_total() {
|
||||
metrics::counter!("fetch_wait_total").increment(1);
|
||||
}
|
||||
|
||||
/// Set the delivery queue depth gauge (sample). Updated at enqueue/fetch time.
|
||||
pub fn record_delivery_queue_depth(depth: usize) {
|
||||
metrics::gauge!("delivery_queue_depth").set(depth as f64);
|
||||
}
|
||||
|
||||
/// Record one KeyPackage upload (success).
|
||||
pub fn record_key_package_upload_total() {
|
||||
metrics::counter!("key_package_upload_total").increment(1);
|
||||
}
|
||||
|
||||
/// Record successful auth login (session token issued).
|
||||
pub fn record_auth_login_success_total() {
|
||||
metrics::counter!("auth_login_success_total").increment(1);
|
||||
}
|
||||
|
||||
/// Record failed auth login attempt.
|
||||
pub fn record_auth_login_failure_total() {
|
||||
metrics::counter!("auth_login_failure_total").increment(1);
|
||||
}
|
||||
|
||||
/// Record rate limit hit (enqueue rejected).
|
||||
pub fn record_rate_limit_hit_total() {
|
||||
metrics::counter!("rate_limit_hit_total").increment(1);
|
||||
}
|
||||
351
crates/quicnprotochat-server/src/node_service/auth_ops.rs
Normal file
351
crates/quicnprotochat-server/src/node_service/auth_ops.rs
Normal file
@@ -0,0 +1,351 @@
|
||||
use capnp::capability::Promise;
|
||||
use opaque_ke::{
|
||||
CredentialFinalization, CredentialRequest, RegistrationRequest, RegistrationUpload,
|
||||
ServerLogin, ServerRegistration,
|
||||
};
|
||||
use quicnprotochat_core::opaque_auth::OpaqueSuite;
|
||||
use quicnprotochat_proto::node_capnp::node_service;
|
||||
|
||||
use crate::auth::{coded_error, current_timestamp, PendingLogin, SESSION_TTL_SECS};
|
||||
use crate::error_codes::*;
|
||||
use crate::metrics;
|
||||
use crate::storage::StorageError;
|
||||
|
||||
use super::NodeServiceImpl;
|
||||
|
||||
// Audit events in this module must never include secrets (no session tokens, passwords, or raw keys).
|
||||
|
||||
fn storage_err(err: StorageError) -> capnp::Error {
|
||||
coded_error(E009_STORAGE_ERROR, err)
|
||||
}
|
||||
|
||||
impl NodeServiceImpl {
|
||||
pub fn handle_opaque_login_start(
|
||||
&mut self,
|
||||
params: node_service::OpaqueLoginStartParams,
|
||||
mut results: node_service::OpaqueLoginStartResults,
|
||||
) -> Promise<(), capnp::Error> {
|
||||
let p = match params.get() {
|
||||
Ok(p) => p,
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let username = match p.get_username() {
|
||||
Ok(v) => v.to_string().unwrap_or_default().to_string(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let request_bytes = match p.get_request() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
|
||||
if username.is_empty() {
|
||||
return Promise::err(coded_error(E011_USERNAME_EMPTY, "username must not be empty"));
|
||||
}
|
||||
|
||||
let credential_request = match CredentialRequest::<OpaqueSuite>::deserialize(&request_bytes) {
|
||||
Ok(r) => r,
|
||||
Err(e) => {
|
||||
return Promise::err(coded_error(
|
||||
E010_OPAQUE_ERROR,
|
||||
format!("invalid credential request: {e}"),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
let password_file = match self.store.get_user_record(&username) {
|
||||
Ok(Some(bytes)) => match ServerRegistration::<OpaqueSuite>::deserialize(&bytes) {
|
||||
Ok(pf) => Some(pf),
|
||||
Err(e) => {
|
||||
return Promise::err(coded_error(
|
||||
E010_OPAQUE_ERROR,
|
||||
format!("corrupt user record: {e}"),
|
||||
))
|
||||
}
|
||||
},
|
||||
Ok(None) => {
|
||||
return Promise::err(coded_error(E010_OPAQUE_ERROR, "user not registered"))
|
||||
}
|
||||
Err(e) => return Promise::err(storage_err(e)),
|
||||
};
|
||||
|
||||
let mut rng = rand::rngs::OsRng;
|
||||
let result = match ServerLogin::<OpaqueSuite>::start(
|
||||
&mut rng,
|
||||
&self.opaque_setup,
|
||||
password_file,
|
||||
credential_request,
|
||||
username.as_bytes(),
|
||||
Default::default(),
|
||||
) {
|
||||
Ok(r) => r,
|
||||
Err(e) => {
|
||||
return Promise::err(coded_error(
|
||||
E010_OPAQUE_ERROR,
|
||||
format!("OPAQUE login start failed: {e}"),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
let state_bytes = result.state.serialize().to_vec();
|
||||
self.pending_logins.insert(
|
||||
username.clone(),
|
||||
PendingLogin {
|
||||
state_bytes,
|
||||
created_at: current_timestamp(),
|
||||
},
|
||||
);
|
||||
|
||||
let response_bytes = result.message.serialize();
|
||||
results.get().set_response(&response_bytes);
|
||||
|
||||
tracing::info!(user = %username, "OPAQUE login started");
|
||||
Promise::ok(())
|
||||
}
|
||||
|
||||
pub fn handle_opaque_register_start(
|
||||
&mut self,
|
||||
params: node_service::OpaqueRegisterStartParams,
|
||||
mut results: node_service::OpaqueRegisterStartResults,
|
||||
) -> Promise<(), capnp::Error> {
|
||||
let p = match params.get() {
|
||||
Ok(p) => p,
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let username = match p.get_username() {
|
||||
Ok(v) => v.to_string().unwrap_or_default().to_string(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let request_bytes = match p.get_request() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
|
||||
if username.is_empty() {
|
||||
return Promise::err(coded_error(E011_USERNAME_EMPTY, "username must not be empty"));
|
||||
}
|
||||
|
||||
if let Ok(true) = self.store.has_user_record(&username) {
|
||||
return Promise::err(coded_error(
|
||||
E018_USER_EXISTS,
|
||||
format!("user '{}' already registered", username),
|
||||
));
|
||||
}
|
||||
|
||||
let registration_request = match RegistrationRequest::<OpaqueSuite>::deserialize(&request_bytes) {
|
||||
Ok(r) => r,
|
||||
Err(e) => {
|
||||
return Promise::err(coded_error(
|
||||
E010_OPAQUE_ERROR,
|
||||
format!("invalid registration request: {e}"),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
let result = match ServerRegistration::<OpaqueSuite>::start(
|
||||
&self.opaque_setup,
|
||||
registration_request,
|
||||
username.as_bytes(),
|
||||
) {
|
||||
Ok(r) => r,
|
||||
Err(e) => {
|
||||
return Promise::err(coded_error(
|
||||
E010_OPAQUE_ERROR,
|
||||
format!("OPAQUE registration start failed: {e}"),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
let response_bytes = result.message.serialize();
|
||||
results.get().set_response(&response_bytes);
|
||||
|
||||
tracing::info!(user = %username, "OPAQUE registration started");
|
||||
Promise::ok(())
|
||||
}
|
||||
|
||||
pub fn handle_opaque_login_finish(
|
||||
&mut self,
|
||||
params: node_service::OpaqueLoginFinishParams,
|
||||
mut results: node_service::OpaqueLoginFinishResults,
|
||||
) -> Promise<(), capnp::Error> {
|
||||
let p = match params.get() {
|
||||
Ok(p) => p,
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let username = match p.get_username() {
|
||||
Ok(v) => v.to_string().unwrap_or_default().to_string(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let finalization_bytes = match p.get_finalization() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let identity_key = p.get_identity_key().unwrap_or_default().to_vec();
|
||||
|
||||
if username.is_empty() {
|
||||
return Promise::err(coded_error(E011_USERNAME_EMPTY, "username must not be empty"));
|
||||
}
|
||||
|
||||
let pending = match self.pending_logins.remove(&username) {
|
||||
Some((_, pl)) => pl,
|
||||
None => {
|
||||
// Audit: login failure — do not log secrets (no token, no password).
|
||||
tracing::warn!(user = %username, "audit: auth login failure (no pending login)");
|
||||
metrics::record_auth_login_failure_total();
|
||||
return Promise::err(coded_error(E019_NO_PENDING_LOGIN, "no pending login for this username"))
|
||||
}
|
||||
};
|
||||
|
||||
let server_login = match ServerLogin::<OpaqueSuite>::deserialize(&pending.state_bytes) {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
return Promise::err(coded_error(
|
||||
E010_OPAQUE_ERROR,
|
||||
format!("corrupt login state: {e}"),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
let finalization = match CredentialFinalization::<OpaqueSuite>::deserialize(&finalization_bytes) {
|
||||
Ok(f) => f,
|
||||
Err(e) => {
|
||||
return Promise::err(coded_error(
|
||||
E010_OPAQUE_ERROR,
|
||||
format!("invalid credential finalization: {e}"),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
let _result = match server_login.finish(finalization, Default::default()) {
|
||||
Ok(r) => r,
|
||||
Err(e) => {
|
||||
tracing::warn!(user = %username, "audit: auth login failure (OPAQUE finish failed)");
|
||||
metrics::record_auth_login_failure_total();
|
||||
return Promise::err(coded_error(
|
||||
E010_OPAQUE_ERROR,
|
||||
format!("OPAQUE login finish failed (bad password?): {e}"),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
if identity_key.is_empty() {
|
||||
metrics::record_auth_login_failure_total();
|
||||
return Promise::err(coded_error(
|
||||
E016_IDENTITY_MISMATCH,
|
||||
"identity key required to bind session token",
|
||||
));
|
||||
}
|
||||
|
||||
if let Ok(Some(stored_ik)) = self.store.get_user_identity_key(&username) {
|
||||
if stored_ik != identity_key {
|
||||
tracing::warn!(user = %username, "audit: auth login failure (identity mismatch)");
|
||||
metrics::record_auth_login_failure_total();
|
||||
return Promise::err(coded_error(
|
||||
E016_IDENTITY_MISMATCH,
|
||||
"identity key does not match registered key",
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
let mut token = [0u8; 32];
|
||||
rand::RngCore::fill_bytes(&mut rand::rngs::OsRng, &mut token);
|
||||
let token_vec = token.to_vec();
|
||||
|
||||
let now = current_timestamp();
|
||||
self.sessions.insert(
|
||||
token_vec.clone(),
|
||||
crate::auth::SessionInfo {
|
||||
username: username.clone(),
|
||||
identity_key,
|
||||
created_at: now,
|
||||
expires_at: now + SESSION_TTL_SECS,
|
||||
},
|
||||
);
|
||||
|
||||
results.get().set_session_token(&token_vec);
|
||||
|
||||
// Audit: login success — do not log session token or any secrets.
|
||||
metrics::record_auth_login_success_total();
|
||||
tracing::info!(user = %username, "audit: auth login success — session token issued");
|
||||
Promise::ok(())
|
||||
}
|
||||
|
||||
pub fn handle_opaque_register_finish(
|
||||
&mut self,
|
||||
params: node_service::OpaqueRegisterFinishParams,
|
||||
mut results: node_service::OpaqueRegisterFinishResults,
|
||||
) -> Promise<(), capnp::Error> {
|
||||
let p = match params.get() {
|
||||
Ok(p) => p,
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let username = match p.get_username() {
|
||||
Ok(v) => v.to_string().unwrap_or_default().to_string(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let upload_bytes = match p.get_upload() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let identity_key = p.get_identity_key().unwrap_or_default().to_vec();
|
||||
|
||||
if username.is_empty() {
|
||||
return Promise::err(coded_error(E011_USERNAME_EMPTY, "username must not be empty"));
|
||||
}
|
||||
|
||||
let _request = match RegistrationRequest::<OpaqueSuite>::deserialize(&upload_bytes) {
|
||||
Ok(r) => r,
|
||||
Err(e) => {
|
||||
return Promise::err(coded_error(
|
||||
E010_OPAQUE_ERROR,
|
||||
format!("invalid registration upload: {e}"),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
match self.store.has_user_record(&username) {
|
||||
Ok(true) => {
|
||||
return Promise::err(coded_error(
|
||||
E018_USER_EXISTS,
|
||||
format!("user '{}' already registered", username),
|
||||
))
|
||||
}
|
||||
Err(e) => return Promise::err(storage_err(e)),
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let upload = match RegistrationUpload::<OpaqueSuite>::deserialize(&upload_bytes) {
|
||||
Ok(u) => u,
|
||||
Err(e) => {
|
||||
return Promise::err(coded_error(
|
||||
E010_OPAQUE_ERROR,
|
||||
format!("invalid registration upload: {e}"),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
let password_file = ServerRegistration::<OpaqueSuite>::finish(upload);
|
||||
let record_bytes = password_file.serialize().to_vec();
|
||||
|
||||
if let Err(e) = self
|
||||
.store
|
||||
.store_user_record(&username, record_bytes)
|
||||
.map_err(storage_err)
|
||||
{
|
||||
return Promise::err(e);
|
||||
}
|
||||
|
||||
if !identity_key.is_empty() {
|
||||
if let Err(e) = self
|
||||
.store
|
||||
.store_user_identity_key(&username, identity_key)
|
||||
.map_err(storage_err)
|
||||
{
|
||||
return Promise::err(e);
|
||||
}
|
||||
}
|
||||
|
||||
results.get().set_success(true);
|
||||
tracing::info!(user = %username, "OPAQUE registration complete");
|
||||
Promise::ok(())
|
||||
}
|
||||
}
|
||||
318
crates/quicnprotochat-server/src/node_service/delivery.rs
Normal file
318
crates/quicnprotochat-server/src/node_service/delivery.rs
Normal file
@@ -0,0 +1,318 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use capnp::capability::Promise;
|
||||
use dashmap::DashMap;
|
||||
use quicnprotochat_proto::node_capnp::node_service;
|
||||
use tokio::sync::Notify;
|
||||
use tokio::time::timeout;
|
||||
|
||||
use crate::auth::{
|
||||
check_rate_limit, coded_error, fmt_hex, require_identity_or_request, validate_auth_context,
|
||||
};
|
||||
use crate::error_codes::*;
|
||||
use crate::metrics;
|
||||
use crate::storage::{StorageError, Store};
|
||||
|
||||
use super::{NodeServiceImpl, CURRENT_WIRE_VERSION};
|
||||
|
||||
// Audit events here must not include secrets: no payload content, no full recipient/token bytes (prefix only).
|
||||
|
||||
const MAX_PAYLOAD_BYTES: usize = 5 * 1024 * 1024; // 5 MB cap per message
|
||||
const MAX_QUEUE_DEPTH: usize = 1000;
|
||||
|
||||
fn storage_err(err: StorageError) -> capnp::Error {
|
||||
coded_error(E009_STORAGE_ERROR, err)
|
||||
}
|
||||
|
||||
pub fn fill_payloads_wait(
|
||||
results: &mut node_service::FetchWaitResults,
|
||||
messages: Vec<(u64, Vec<u8>)>,
|
||||
) {
|
||||
let mut list = results.get().init_payloads(messages.len() as u32);
|
||||
for (i, (seq, data)) in messages.iter().enumerate() {
|
||||
let mut entry = list.reborrow().get(i as u32);
|
||||
entry.set_seq(*seq);
|
||||
entry.set_data(data);
|
||||
}
|
||||
}
|
||||
|
||||
impl NodeServiceImpl {
|
||||
pub fn handle_enqueue(
|
||||
&mut self,
|
||||
params: node_service::EnqueueParams,
|
||||
mut results: node_service::EnqueueResults,
|
||||
) -> Promise<(), capnp::Error> {
|
||||
let p = match params.get() {
|
||||
Ok(p) => p,
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let recipient_key = match p.get_recipient_key() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let payload = match p.get_payload() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let channel_id = p.get_channel_id().unwrap_or_default().to_vec();
|
||||
let version = p.get_version();
|
||||
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
|
||||
Ok(ctx) => ctx,
|
||||
Err(e) => return Promise::err(e),
|
||||
};
|
||||
|
||||
if recipient_key.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E004_IDENTITY_KEY_LENGTH,
|
||||
format!("recipientKey must be exactly 32 bytes, got {}", recipient_key.len()),
|
||||
));
|
||||
}
|
||||
if payload.is_empty() {
|
||||
return Promise::err(coded_error(E005_PAYLOAD_EMPTY, "payload must not be empty"));
|
||||
}
|
||||
if payload.len() > MAX_PAYLOAD_BYTES {
|
||||
return Promise::err(coded_error(
|
||||
E006_PAYLOAD_TOO_LARGE,
|
||||
format!("payload exceeds max size ({} bytes)", MAX_PAYLOAD_BYTES),
|
||||
));
|
||||
}
|
||||
if version != CURRENT_WIRE_VERSION {
|
||||
return Promise::err(coded_error(
|
||||
E012_WIRE_VERSION,
|
||||
format!("unsupported wire version {} (expected {CURRENT_WIRE_VERSION})", version),
|
||||
));
|
||||
}
|
||||
|
||||
if let Err(e) = check_rate_limit(&self.rate_limits, &auth_ctx.token) {
|
||||
// Audit: rate limit hit — do not log token or identity.
|
||||
tracing::warn!("rate_limit_hit");
|
||||
metrics::record_rate_limit_hit_total();
|
||||
return Promise::err(e);
|
||||
}
|
||||
|
||||
// When sealed_sender is true, enqueue does not require identity; valid token only.
|
||||
if !self.sealed_sender {
|
||||
if let Err(e) = require_identity_or_request(
|
||||
&auth_ctx,
|
||||
&recipient_key,
|
||||
self.auth_cfg.allow_insecure_identity_from_request,
|
||||
) {
|
||||
return Promise::err(e);
|
||||
}
|
||||
}
|
||||
|
||||
match self.store.queue_depth(&recipient_key, &channel_id) {
|
||||
Ok(depth) if depth >= MAX_QUEUE_DEPTH => {
|
||||
return Promise::err(coded_error(
|
||||
E015_QUEUE_FULL,
|
||||
format!("queue depth {} exceeds limit {}", depth, MAX_QUEUE_DEPTH),
|
||||
));
|
||||
}
|
||||
Err(e) => return Promise::err(storage_err(e)),
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let payload_len = payload.len();
|
||||
let seq = match self
|
||||
.store
|
||||
.enqueue(&recipient_key, &channel_id, payload)
|
||||
.map_err(storage_err)
|
||||
{
|
||||
Ok(seq) => seq,
|
||||
Err(e) => return Promise::err(e),
|
||||
};
|
||||
|
||||
results.get().set_seq(seq);
|
||||
|
||||
// Metrics and audit. Audit events must not include secrets (no payload, no full keys).
|
||||
metrics::record_enqueue_total();
|
||||
metrics::record_enqueue_bytes(payload_len as u64);
|
||||
if let Ok(depth) = self.store.queue_depth(&recipient_key, &channel_id) {
|
||||
metrics::record_delivery_queue_depth(depth);
|
||||
}
|
||||
tracing::info!(
|
||||
recipient_prefix = %fmt_hex(&recipient_key[..4]),
|
||||
payload_len = payload_len,
|
||||
seq = seq,
|
||||
"audit: enqueue"
|
||||
);
|
||||
|
||||
crate::auth::waiter(&self.waiters, &recipient_key).notify_waiters();
|
||||
|
||||
Promise::ok(())
|
||||
}
|
||||
|
||||
pub fn handle_fetch(
|
||||
&mut self,
|
||||
params: node_service::FetchParams,
|
||||
mut results: node_service::FetchResults,
|
||||
) -> Promise<(), capnp::Error> {
|
||||
let recipient_key = match params.get() {
|
||||
Ok(p) => match p.get_recipient_key() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
},
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let channel_id = params
|
||||
.get()
|
||||
.ok()
|
||||
.and_then(|p| p.get_channel_id().ok())
|
||||
.map(|c| c.to_vec())
|
||||
.unwrap_or_default();
|
||||
let version = params
|
||||
.get()
|
||||
.ok()
|
||||
.map(|p| p.get_version())
|
||||
.unwrap_or(CURRENT_WIRE_VERSION);
|
||||
let limit = params.get().ok().map(|p| p.get_limit()).unwrap_or(0);
|
||||
let auth_ctx = match params
|
||||
.get()
|
||||
.ok()
|
||||
.map(|p| validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()))
|
||||
.transpose()
|
||||
{
|
||||
Ok(ctx) => ctx,
|
||||
Err(e) => return Promise::err(e),
|
||||
};
|
||||
|
||||
if recipient_key.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E004_IDENTITY_KEY_LENGTH,
|
||||
format!("recipientKey must be exactly 32 bytes, got {}", recipient_key.len()),
|
||||
));
|
||||
}
|
||||
if version != CURRENT_WIRE_VERSION {
|
||||
return Promise::err(coded_error(
|
||||
E012_WIRE_VERSION,
|
||||
format!("unsupported wire version {} (expected {CURRENT_WIRE_VERSION})", version),
|
||||
));
|
||||
}
|
||||
|
||||
let auth_ctx = match auth_ctx {
|
||||
Some(ctx) => ctx,
|
||||
None => return Promise::err(coded_error(E003_INVALID_TOKEN, "auth required")),
|
||||
};
|
||||
|
||||
if let Err(e) = require_identity_or_request(
|
||||
&auth_ctx,
|
||||
&recipient_key,
|
||||
self.auth_cfg.allow_insecure_identity_from_request,
|
||||
) {
|
||||
return Promise::err(e);
|
||||
}
|
||||
|
||||
let messages = if limit > 0 {
|
||||
match self
|
||||
.store
|
||||
.fetch_limited(&recipient_key, &channel_id, limit as usize)
|
||||
.map_err(storage_err)
|
||||
{
|
||||
Ok(m) => m,
|
||||
Err(e) => return Promise::err(e),
|
||||
}
|
||||
} else {
|
||||
match self
|
||||
.store
|
||||
.fetch(&recipient_key, &channel_id)
|
||||
.map_err(storage_err)
|
||||
{
|
||||
Ok(m) => m,
|
||||
Err(e) => return Promise::err(e),
|
||||
}
|
||||
};
|
||||
|
||||
// Audit: fetch — do not log payload or full keys.
|
||||
metrics::record_fetch_total();
|
||||
tracing::info!(
|
||||
recipient_prefix = %fmt_hex(&recipient_key[..4]),
|
||||
count = messages.len(),
|
||||
"audit: fetch"
|
||||
);
|
||||
|
||||
let mut list = results.get().init_payloads(messages.len() as u32);
|
||||
for (i, (seq, data)) in messages.iter().enumerate() {
|
||||
let mut entry = list.reborrow().get(i as u32);
|
||||
entry.set_seq(*seq);
|
||||
entry.set_data(data);
|
||||
}
|
||||
|
||||
Promise::ok(())
|
||||
}
|
||||
|
||||
pub fn handle_fetch_wait(
|
||||
&mut self,
|
||||
params: node_service::FetchWaitParams,
|
||||
mut results: node_service::FetchWaitResults,
|
||||
) -> Promise<(), capnp::Error> {
|
||||
let p = match params.get() {
|
||||
Ok(p) => p,
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let recipient_key = match p.get_recipient_key() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let channel_id = p.get_channel_id().unwrap_or_default().to_vec();
|
||||
let version = p.get_version();
|
||||
let timeout_ms = p.get_timeout_ms();
|
||||
let limit = p.get_limit();
|
||||
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
|
||||
Ok(ctx) => ctx,
|
||||
Err(e) => return Promise::err(e),
|
||||
};
|
||||
|
||||
if recipient_key.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E004_IDENTITY_KEY_LENGTH,
|
||||
format!("recipientKey must be exactly 32 bytes, got {}", recipient_key.len()),
|
||||
));
|
||||
}
|
||||
if version != CURRENT_WIRE_VERSION {
|
||||
return Promise::err(coded_error(
|
||||
E012_WIRE_VERSION,
|
||||
format!("unsupported wire version {} (expected {CURRENT_WIRE_VERSION})", version),
|
||||
));
|
||||
}
|
||||
|
||||
if let Err(e) = require_identity_or_request(
|
||||
&auth_ctx,
|
||||
&recipient_key,
|
||||
self.auth_cfg.allow_insecure_identity_from_request,
|
||||
) {
|
||||
return Promise::err(e);
|
||||
}
|
||||
|
||||
let store = Arc::clone(&self.store);
|
||||
let waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>> = self.waiters.clone();
|
||||
|
||||
Promise::from_future(async move {
|
||||
let fetch_fn = |s: &Arc<dyn Store>, rk: &[u8], ch: &[u8], lim: u32| -> Result<Vec<(u64, Vec<u8>)>, capnp::Error> {
|
||||
if lim > 0 {
|
||||
s.fetch_limited(rk, ch, lim as usize).map_err(storage_err)
|
||||
} else {
|
||||
s.fetch(rk, ch).map_err(storage_err)
|
||||
}
|
||||
};
|
||||
|
||||
let messages = fetch_fn(&store, &recipient_key, &channel_id, limit)?;
|
||||
|
||||
if messages.is_empty() && timeout_ms > 0 {
|
||||
let waiter = waiters
|
||||
.entry(recipient_key.clone())
|
||||
.or_insert_with(|| Arc::new(Notify::new()))
|
||||
.clone();
|
||||
let _ = timeout(Duration::from_millis(timeout_ms), waiter.notified()).await;
|
||||
let msgs = fetch_fn(&store, &recipient_key, &channel_id, limit)?;
|
||||
fill_payloads_wait(&mut results, msgs);
|
||||
metrics::record_fetch_wait_total();
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
fill_payloads_wait(&mut results, messages);
|
||||
metrics::record_fetch_wait_total();
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
||||
259
crates/quicnprotochat-server/src/node_service/key_ops.rs
Normal file
259
crates/quicnprotochat-server/src/node_service/key_ops.rs
Normal file
@@ -0,0 +1,259 @@
|
||||
use capnp::capability::Promise;
|
||||
use quicnprotochat_proto::node_capnp::node_service;
|
||||
|
||||
use crate::auth::{coded_error, fmt_hex, require_identity_or_request, validate_auth_context};
|
||||
use crate::error_codes::*;
|
||||
use crate::metrics;
|
||||
use crate::storage::StorageError;
|
||||
|
||||
use super::NodeServiceImpl;
|
||||
|
||||
fn storage_err(err: StorageError) -> capnp::Error {
|
||||
coded_error(E009_STORAGE_ERROR, err)
|
||||
}
|
||||
|
||||
const MAX_KEYPACKAGE_BYTES: usize = 1 * 1024 * 1024; // 1 MB cap per KeyPackage
|
||||
|
||||
impl NodeServiceImpl {
|
||||
pub fn handle_upload_key_package(
|
||||
&mut self,
|
||||
params: node_service::UploadKeyPackageParams,
|
||||
mut results: node_service::UploadKeyPackageResults,
|
||||
) -> Promise<(), capnp::Error> {
|
||||
let (auth_ctx, identity_key, package) = match params.get() {
|
||||
Ok(p) => {
|
||||
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
|
||||
Ok(ctx) => ctx,
|
||||
Err(e) => return Promise::err(e),
|
||||
};
|
||||
let ik = match p.get_identity_key() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let pkg = match p.get_package() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
(auth_ctx, ik, pkg)
|
||||
}
|
||||
Err(e) => return Promise::err(e),
|
||||
};
|
||||
|
||||
if identity_key.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E004_IDENTITY_KEY_LENGTH,
|
||||
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
|
||||
));
|
||||
}
|
||||
if package.is_empty() {
|
||||
return Promise::err(coded_error(E007_PACKAGE_EMPTY, "package must not be empty"));
|
||||
}
|
||||
if package.len() > MAX_KEYPACKAGE_BYTES {
|
||||
return Promise::err(coded_error(
|
||||
E008_PACKAGE_TOO_LARGE,
|
||||
format!("package exceeds max size ({} bytes)", MAX_KEYPACKAGE_BYTES),
|
||||
));
|
||||
}
|
||||
|
||||
if let Err(e) = require_identity_or_request(
|
||||
&auth_ctx,
|
||||
&identity_key,
|
||||
self.auth_cfg.allow_insecure_identity_from_request,
|
||||
) {
|
||||
return Promise::err(e);
|
||||
}
|
||||
|
||||
if let Err(e) = quicnprotochat_core::validate_keypackage_ciphersuite(&package) {
|
||||
return Promise::err(coded_error(
|
||||
E021_CIPHERSUITE_NOT_ALLOWED,
|
||||
format!("KeyPackage ciphersuite not allowed: {e}"),
|
||||
));
|
||||
}
|
||||
|
||||
let fingerprint: Vec<u8> = crate::auth::fingerprint(&package);
|
||||
if let Err(e) = self
|
||||
.store
|
||||
.upload_key_package(&identity_key, package)
|
||||
.map_err(storage_err)
|
||||
{
|
||||
return Promise::err(e);
|
||||
}
|
||||
|
||||
results.get().set_fingerprint(&fingerprint);
|
||||
|
||||
metrics::record_key_package_upload_total();
|
||||
// Audit: KeyPackage upload — only fingerprint prefix, no secrets.
|
||||
tracing::info!(
|
||||
identity_prefix = %fmt_hex(&identity_key[..4]),
|
||||
fingerprint_prefix = %fmt_hex(&fingerprint[..4]),
|
||||
"audit: key_package_upload"
|
||||
);
|
||||
|
||||
Promise::ok(())
|
||||
}
|
||||
|
||||
pub fn handle_fetch_key_package(
|
||||
&mut self,
|
||||
params: node_service::FetchKeyPackageParams,
|
||||
mut results: node_service::FetchKeyPackageResults,
|
||||
) -> Promise<(), capnp::Error> {
|
||||
let identity_key = match params.get() {
|
||||
Ok(p) => match p.get_identity_key() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
},
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
if let Err(e) = params
|
||||
.get()
|
||||
.ok()
|
||||
.map(|p| crate::auth::validate_auth(&self.auth_cfg, &self.sessions, p.get_auth()))
|
||||
.transpose()
|
||||
{
|
||||
return Promise::err(e);
|
||||
}
|
||||
|
||||
if identity_key.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E004_IDENTITY_KEY_LENGTH,
|
||||
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
|
||||
));
|
||||
}
|
||||
|
||||
let package = match self
|
||||
.store
|
||||
.fetch_key_package(&identity_key)
|
||||
.map_err(storage_err)
|
||||
{
|
||||
Ok(p) => p,
|
||||
Err(e) => return Promise::err(e),
|
||||
};
|
||||
|
||||
match package {
|
||||
Some(pkg) => {
|
||||
tracing::debug!(identity = %fmt_hex(&identity_key[..4]), "KeyPackage fetched");
|
||||
results.get().set_package(&pkg);
|
||||
}
|
||||
None => {
|
||||
tracing::debug!(
|
||||
identity = %fmt_hex(&identity_key[..4]),
|
||||
"no KeyPackage available for identity"
|
||||
);
|
||||
results.get().set_package(&[]);
|
||||
}
|
||||
}
|
||||
|
||||
Promise::ok(())
|
||||
}
|
||||
|
||||
pub fn handle_upload_hybrid_key(
|
||||
&mut self,
|
||||
params: node_service::UploadHybridKeyParams,
|
||||
_results: node_service::UploadHybridKeyResults,
|
||||
) -> Promise<(), capnp::Error> {
|
||||
let p = match params.get() {
|
||||
Ok(p) => p,
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let identity_key = match p.get_identity_key() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let hybrid_pk = match p.get_hybrid_public_key() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
|
||||
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
|
||||
Ok(ctx) => ctx,
|
||||
Err(e) => return Promise::err(e),
|
||||
};
|
||||
|
||||
if identity_key.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E004_IDENTITY_KEY_LENGTH,
|
||||
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
|
||||
));
|
||||
}
|
||||
if hybrid_pk.is_empty() {
|
||||
return Promise::err(coded_error(E013_HYBRID_KEY_EMPTY, "hybridPublicKey must not be empty"));
|
||||
}
|
||||
|
||||
if let Err(e) = require_identity_or_request(
|
||||
&auth_ctx,
|
||||
&identity_key,
|
||||
self.auth_cfg.allow_insecure_identity_from_request,
|
||||
) {
|
||||
return Promise::err(e);
|
||||
}
|
||||
|
||||
if let Err(e) = self
|
||||
.store
|
||||
.upload_hybrid_key(&identity_key, hybrid_pk)
|
||||
.map_err(storage_err)
|
||||
{
|
||||
return Promise::err(e);
|
||||
}
|
||||
|
||||
tracing::debug!(identity = %fmt_hex(&identity_key[..4]), "hybrid public key uploaded");
|
||||
|
||||
Promise::ok(())
|
||||
}
|
||||
|
||||
pub fn handle_fetch_hybrid_key(
|
||||
&mut self,
|
||||
params: node_service::FetchHybridKeyParams,
|
||||
mut results: node_service::FetchHybridKeyResults,
|
||||
) -> Promise<(), capnp::Error> {
|
||||
let p = match params.get() {
|
||||
Ok(p) => p,
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let identity_key = match p.get_identity_key() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
|
||||
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
|
||||
Ok(ctx) => ctx,
|
||||
Err(e) => return Promise::err(e),
|
||||
};
|
||||
|
||||
if identity_key.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E004_IDENTITY_KEY_LENGTH,
|
||||
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
|
||||
));
|
||||
}
|
||||
|
||||
if let Err(e) = require_identity_or_request(
|
||||
&auth_ctx,
|
||||
&identity_key,
|
||||
self.auth_cfg.allow_insecure_identity_from_request,
|
||||
) {
|
||||
return Promise::err(e);
|
||||
}
|
||||
|
||||
let hybrid_pk = match self
|
||||
.store
|
||||
.fetch_hybrid_key(&identity_key)
|
||||
.map_err(storage_err)
|
||||
{
|
||||
Ok(p) => p,
|
||||
Err(e) => return Promise::err(e),
|
||||
};
|
||||
|
||||
match hybrid_pk {
|
||||
Some(pk) => {
|
||||
tracing::debug!(identity = %fmt_hex(&identity_key[..4]), "hybrid key fetched");
|
||||
results.get().set_hybrid_public_key(&pk);
|
||||
}
|
||||
None => {
|
||||
tracing::debug!(identity = %fmt_hex(&identity_key[..4]), "no hybrid key for identity");
|
||||
results.get().set_hybrid_public_key(&[]);
|
||||
}
|
||||
}
|
||||
|
||||
Promise::ok(())
|
||||
}
|
||||
}
|
||||
246
crates/quicnprotochat-server/src/node_service/mod.rs
Normal file
246
crates/quicnprotochat-server/src/node_service/mod.rs
Normal file
@@ -0,0 +1,246 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use capnp_rpc::RpcSystem;
|
||||
use dashmap::DashMap;
|
||||
use opaque_ke::ServerSetup;
|
||||
use quicnprotochat_core::opaque_auth::OpaqueSuite;
|
||||
use quicnprotochat_proto::node_capnp::node_service;
|
||||
use tokio::sync::Notify;
|
||||
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
|
||||
|
||||
use crate::auth::{
|
||||
current_timestamp, AuthConfig, PendingLogin, RateEntry, SessionInfo,
|
||||
PENDING_LOGIN_TTL_SECS, RATE_LIMIT_WINDOW_SECS,
|
||||
};
|
||||
use crate::storage::Store;
|
||||
|
||||
mod auth_ops;
|
||||
mod delivery;
|
||||
mod key_ops;
|
||||
mod p2p_ops;
|
||||
|
||||
impl node_service::Server for NodeServiceImpl {
|
||||
fn upload_key_package(
|
||||
&mut self,
|
||||
params: node_service::UploadKeyPackageParams,
|
||||
results: node_service::UploadKeyPackageResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_upload_key_package(params, results)
|
||||
}
|
||||
|
||||
fn fetch_key_package(
|
||||
&mut self,
|
||||
params: node_service::FetchKeyPackageParams,
|
||||
results: node_service::FetchKeyPackageResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_fetch_key_package(params, results)
|
||||
}
|
||||
|
||||
fn enqueue(
|
||||
&mut self,
|
||||
params: node_service::EnqueueParams,
|
||||
results: node_service::EnqueueResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_enqueue(params, results)
|
||||
}
|
||||
|
||||
fn fetch(
|
||||
&mut self,
|
||||
params: node_service::FetchParams,
|
||||
results: node_service::FetchResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_fetch(params, results)
|
||||
}
|
||||
|
||||
fn fetch_wait(
|
||||
&mut self,
|
||||
params: node_service::FetchWaitParams,
|
||||
results: node_service::FetchWaitResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_fetch_wait(params, results)
|
||||
}
|
||||
|
||||
fn health(
|
||||
&mut self,
|
||||
params: node_service::HealthParams,
|
||||
results: node_service::HealthResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_health(params, results)
|
||||
}
|
||||
|
||||
fn upload_hybrid_key(
|
||||
&mut self,
|
||||
params: node_service::UploadHybridKeyParams,
|
||||
results: node_service::UploadHybridKeyResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_upload_hybrid_key(params, results)
|
||||
}
|
||||
|
||||
fn fetch_hybrid_key(
|
||||
&mut self,
|
||||
params: node_service::FetchHybridKeyParams,
|
||||
results: node_service::FetchHybridKeyResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_fetch_hybrid_key(params, results)
|
||||
}
|
||||
|
||||
fn opaque_login_start(
|
||||
&mut self,
|
||||
params: node_service::OpaqueLoginStartParams,
|
||||
results: node_service::OpaqueLoginStartResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_opaque_login_start(params, results)
|
||||
}
|
||||
|
||||
fn opaque_register_start(
|
||||
&mut self,
|
||||
params: node_service::OpaqueRegisterStartParams,
|
||||
results: node_service::OpaqueRegisterStartResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_opaque_register_start(params, results)
|
||||
}
|
||||
|
||||
fn opaque_login_finish(
|
||||
&mut self,
|
||||
params: node_service::OpaqueLoginFinishParams,
|
||||
results: node_service::OpaqueLoginFinishResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_opaque_login_finish(params, results)
|
||||
}
|
||||
|
||||
fn opaque_register_finish(
|
||||
&mut self,
|
||||
params: node_service::OpaqueRegisterFinishParams,
|
||||
results: node_service::OpaqueRegisterFinishResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_opaque_register_finish(params, results)
|
||||
}
|
||||
|
||||
fn publish_endpoint(
|
||||
&mut self,
|
||||
params: node_service::PublishEndpointParams,
|
||||
results: node_service::PublishEndpointResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_publish_endpoint(params, results)
|
||||
}
|
||||
|
||||
fn resolve_endpoint(
|
||||
&mut self,
|
||||
params: node_service::ResolveEndpointParams,
|
||||
results: node_service::ResolveEndpointResults,
|
||||
) -> capnp::capability::Promise<(), capnp::Error> {
|
||||
self.handle_resolve_endpoint(params, results)
|
||||
}
|
||||
}
|
||||
|
||||
pub const CURRENT_WIRE_VERSION: u16 = 1;
|
||||
|
||||
pub struct NodeServiceImpl {
|
||||
pub store: Arc<dyn Store>,
|
||||
pub waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
|
||||
pub auth_cfg: Arc<AuthConfig>,
|
||||
pub opaque_setup: Arc<ServerSetup<OpaqueSuite>>,
|
||||
pub pending_logins: Arc<DashMap<String, PendingLogin>>,
|
||||
pub sessions: Arc<DashMap<Vec<u8>, SessionInfo>>,
|
||||
pub rate_limits: Arc<DashMap<Vec<u8>, RateEntry>>,
|
||||
/// When true, enqueue does not require identity-bound session (Sealed Sender).
|
||||
pub sealed_sender: bool,
|
||||
}
|
||||
|
||||
impl NodeServiceImpl {
|
||||
pub fn new(
|
||||
store: Arc<dyn Store>,
|
||||
waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
|
||||
auth_cfg: Arc<AuthConfig>,
|
||||
opaque_setup: Arc<ServerSetup<OpaqueSuite>>,
|
||||
pending_logins: Arc<DashMap<String, PendingLogin>>,
|
||||
sessions: Arc<DashMap<Vec<u8>, SessionInfo>>,
|
||||
rate_limits: Arc<DashMap<Vec<u8>, RateEntry>>,
|
||||
sealed_sender: bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
store,
|
||||
waiters,
|
||||
auth_cfg,
|
||||
opaque_setup,
|
||||
pending_logins,
|
||||
sessions,
|
||||
rate_limits,
|
||||
sealed_sender,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_node_connection(
|
||||
connecting: quinn::Connecting,
|
||||
store: Arc<dyn Store>,
|
||||
waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
|
||||
auth_cfg: Arc<AuthConfig>,
|
||||
opaque_setup: Arc<ServerSetup<OpaqueSuite>>,
|
||||
pending_logins: Arc<DashMap<String, PendingLogin>>,
|
||||
sessions: Arc<DashMap<Vec<u8>, SessionInfo>>,
|
||||
rate_limits: Arc<DashMap<Vec<u8>, RateEntry>>,
|
||||
sealed_sender: bool,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
let connection = connecting.await?;
|
||||
|
||||
tracing::info!(peer = %connection.remote_address(), "QUIC connected");
|
||||
|
||||
let (send, recv) = connection
|
||||
.accept_bi()
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("failed to accept bi stream: {e}"))?;
|
||||
let (reader, writer) = (recv.compat(), send.compat_write());
|
||||
|
||||
let network = capnp_rpc::twoparty::VatNetwork::new(
|
||||
reader,
|
||||
writer,
|
||||
capnp_rpc::rpc_twoparty_capnp::Side::Server,
|
||||
Default::default(),
|
||||
);
|
||||
|
||||
let service: node_service::Client = capnp_rpc::new_client(NodeServiceImpl::new(
|
||||
store,
|
||||
waiters,
|
||||
auth_cfg,
|
||||
opaque_setup,
|
||||
pending_logins,
|
||||
sessions,
|
||||
rate_limits,
|
||||
sealed_sender,
|
||||
));
|
||||
|
||||
RpcSystem::new(Box::new(network), Some(service.client))
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("NodeService RPC error: {e}"))
|
||||
}
|
||||
|
||||
const MESSAGE_TTL_SECS: u64 = 7 * 24 * 60 * 60; // 7 days
|
||||
|
||||
pub fn spawn_cleanup_task(
|
||||
sessions: Arc<DashMap<Vec<u8>, SessionInfo>>,
|
||||
pending_logins: Arc<DashMap<String, PendingLogin>>,
|
||||
rate_limits: Arc<DashMap<Vec<u8>, RateEntry>>,
|
||||
store: Arc<dyn Store>,
|
||||
) {
|
||||
tokio::spawn(async move {
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(60));
|
||||
loop {
|
||||
interval.tick().await;
|
||||
let now = current_timestamp();
|
||||
|
||||
sessions.retain(|_, info| info.expires_at > now);
|
||||
pending_logins.retain(|_, pl| now - pl.created_at < PENDING_LOGIN_TTL_SECS);
|
||||
rate_limits.retain(|_, entry| now - entry.window_start < RATE_LIMIT_WINDOW_SECS * 2);
|
||||
|
||||
match store.gc_expired_messages(MESSAGE_TTL_SECS) {
|
||||
Ok(n) if n > 0 => {
|
||||
tracing::debug!(expired = n, "garbage collected expired messages")
|
||||
}
|
||||
Err(e) => tracing::warn!(error = %e, "message GC failed"),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
118
crates/quicnprotochat-server/src/node_service/p2p_ops.rs
Normal file
118
crates/quicnprotochat-server/src/node_service/p2p_ops.rs
Normal file
@@ -0,0 +1,118 @@
|
||||
use capnp::capability::Promise;
|
||||
use quicnprotochat_proto::node_capnp::node_service;
|
||||
|
||||
use crate::auth::{
|
||||
coded_error, fmt_hex, require_identity_or_request, validate_auth, validate_auth_context,
|
||||
};
|
||||
use crate::error_codes::*;
|
||||
use crate::storage::StorageError;
|
||||
|
||||
use super::NodeServiceImpl;
|
||||
|
||||
fn storage_err(err: StorageError) -> capnp::Error {
|
||||
coded_error(E009_STORAGE_ERROR, err)
|
||||
}
|
||||
|
||||
impl NodeServiceImpl {
|
||||
pub fn handle_health(
|
||||
&mut self,
|
||||
_params: node_service::HealthParams,
|
||||
mut results: node_service::HealthResults,
|
||||
) -> Promise<(), capnp::Error> {
|
||||
results.get().set_status("ok");
|
||||
Promise::ok(())
|
||||
}
|
||||
|
||||
pub fn handle_publish_endpoint(
|
||||
&mut self,
|
||||
params: node_service::PublishEndpointParams,
|
||||
_results: node_service::PublishEndpointResults,
|
||||
) -> Promise<(), capnp::Error> {
|
||||
let p = match params.get() {
|
||||
Ok(p) => p,
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let identity_key = match p.get_identity_key() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let node_addr = match p.get_node_addr() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let auth_ctx = match validate_auth_context(&self.auth_cfg, &self.sessions, p.get_auth()) {
|
||||
Ok(ctx) => ctx,
|
||||
Err(e) => return Promise::err(e),
|
||||
};
|
||||
|
||||
if identity_key.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E004_IDENTITY_KEY_LENGTH,
|
||||
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
|
||||
));
|
||||
}
|
||||
|
||||
if let Err(e) = require_identity_or_request(
|
||||
&auth_ctx,
|
||||
&identity_key,
|
||||
self.auth_cfg.allow_insecure_identity_from_request,
|
||||
) {
|
||||
return Promise::err(e);
|
||||
}
|
||||
|
||||
if let Err(e) = self
|
||||
.store
|
||||
.publish_endpoint(&identity_key, node_addr)
|
||||
.map_err(storage_err)
|
||||
{
|
||||
return Promise::err(e);
|
||||
}
|
||||
|
||||
tracing::debug!(identity = %fmt_hex(&identity_key[..4]), "endpoint published");
|
||||
|
||||
Promise::ok(())
|
||||
}
|
||||
|
||||
pub fn handle_resolve_endpoint(
|
||||
&mut self,
|
||||
params: node_service::ResolveEndpointParams,
|
||||
mut results: node_service::ResolveEndpointResults,
|
||||
) -> Promise<(), capnp::Error> {
|
||||
let p = match params.get() {
|
||||
Ok(p) => p,
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
let identity_key = match p.get_identity_key() {
|
||||
Ok(v) => v.to_vec(),
|
||||
Err(e) => return Promise::err(coded_error(E020_BAD_PARAMS, e)),
|
||||
};
|
||||
if let Err(e) = validate_auth(&self.auth_cfg, &self.sessions, p.get_auth()) {
|
||||
return Promise::err(e);
|
||||
}
|
||||
|
||||
if identity_key.len() != 32 {
|
||||
return Promise::err(coded_error(
|
||||
E004_IDENTITY_KEY_LENGTH,
|
||||
format!("identityKey must be exactly 32 bytes, got {}", identity_key.len()),
|
||||
));
|
||||
}
|
||||
|
||||
let endpoint = match self
|
||||
.store
|
||||
.resolve_endpoint(&identity_key)
|
||||
.map_err(storage_err)
|
||||
{
|
||||
Ok(e) => e,
|
||||
Err(e) => return Promise::err(e),
|
||||
};
|
||||
|
||||
if let Some(ep) = endpoint {
|
||||
tracing::debug!(identity = %fmt_hex(&identity_key[..4]), "endpoint resolved");
|
||||
results.get().set_node_addr(&ep);
|
||||
} else {
|
||||
results.get().set_node_addr(&[]);
|
||||
}
|
||||
|
||||
Promise::ok(())
|
||||
}
|
||||
}
|
||||
@@ -7,6 +7,33 @@ use rusqlite::{params, Connection};
|
||||
|
||||
use crate::storage::{StorageError, Store};
|
||||
|
||||
/// Schema version after introducing the migration runner (existing DBs had 1).
|
||||
const SCHEMA_VERSION: i32 = 3;
|
||||
|
||||
/// Migrations: (migration_number, SQL). Files named NNN_name.sql, applied in order when N > user_version.
|
||||
const MIGRATIONS: &[(i32, &str)] = &[
|
||||
(1, include_str!("../migrations/001_initial.sql")),
|
||||
(3, include_str!("../migrations/002_add_seq.sql")),
|
||||
];
|
||||
|
||||
/// Runs pending migrations on an open connection: applies any migration whose number is greater
|
||||
/// than the current PRAGMA user_version, then sets user_version to SCHEMA_VERSION.
|
||||
fn run_migrations(conn: &Connection) -> Result<(), StorageError> {
|
||||
let current_version: i32 = conn
|
||||
.pragma_query_value(None, "user_version", |row| row.get(0))
|
||||
.map_err(|e| StorageError::Db(format!("PRAGMA user_version failed: {e}")))?;
|
||||
|
||||
for (migration_num, sql) in MIGRATIONS {
|
||||
if *migration_num > current_version {
|
||||
conn.execute_batch(sql).map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
}
|
||||
}
|
||||
|
||||
conn.pragma_update(None, "user_version", SCHEMA_VERSION)
|
||||
.map_err(|e| StorageError::Db(format!("set user_version failed: {e}")))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// SQLCipher-encrypted storage backend.
|
||||
pub struct SqlStore {
|
||||
conn: Mutex<Connection>,
|
||||
@@ -34,66 +61,21 @@ impl SqlStore {
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
let store = Self {
|
||||
let current_version: i32 = conn
|
||||
.pragma_query_value(None, "user_version", |row| row.get(0))
|
||||
.map_err(|e| StorageError::Db(format!("PRAGMA user_version failed: {e}")))?;
|
||||
|
||||
if current_version > SCHEMA_VERSION {
|
||||
return Err(StorageError::Db(format!(
|
||||
"database schema version {current_version} is newer than supported {SCHEMA_VERSION}"
|
||||
)));
|
||||
}
|
||||
|
||||
run_migrations(&conn)?;
|
||||
|
||||
Ok(Self {
|
||||
conn: Mutex::new(conn),
|
||||
};
|
||||
store.migrate()?;
|
||||
Ok(store)
|
||||
}
|
||||
|
||||
fn migrate(&self) -> Result<(), StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
conn.execute_batch(
|
||||
"CREATE TABLE IF NOT EXISTS key_packages (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
identity_key BLOB NOT NULL,
|
||||
package_data BLOB NOT NULL,
|
||||
created_at INTEGER DEFAULT (strftime('%s','now'))
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS deliveries (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
recipient_key BLOB NOT NULL,
|
||||
channel_id BLOB NOT NULL DEFAULT X'',
|
||||
payload BLOB NOT NULL,
|
||||
created_at INTEGER DEFAULT (strftime('%s','now'))
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS hybrid_keys (
|
||||
identity_key BLOB PRIMARY KEY,
|
||||
hybrid_public_key BLOB NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_kp_identity
|
||||
ON key_packages(identity_key);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_del_recipient_channel
|
||||
ON deliveries(recipient_key, channel_id);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS server_setup (
|
||||
id INTEGER PRIMARY KEY CHECK (id = 1),
|
||||
setup_data BLOB NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
username TEXT PRIMARY KEY,
|
||||
opaque_record BLOB NOT NULL,
|
||||
created_at INTEGER DEFAULT (strftime('%s','now'))
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS user_identity_keys (
|
||||
username TEXT PRIMARY KEY,
|
||||
identity_key BLOB NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS endpoints (
|
||||
identity_key BLOB PRIMARY KEY,
|
||||
node_addr BLOB NOT NULL,
|
||||
updated_at INTEGER DEFAULT (strftime('%s','now'))
|
||||
);",
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -146,37 +128,53 @@ impl Store for SqlStore {
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
payload: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
) -> Result<u64, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
// Atomically get-and-increment the per-inbox sequence counter.
|
||||
// RETURNING gives us the post-update next_seq; the assigned seq is next_seq - 1.
|
||||
let seq: i64 = conn
|
||||
.query_row(
|
||||
"INSERT INTO delivery_seq_counters (recipient_key, channel_id, next_seq)
|
||||
VALUES (?1, ?2, 1)
|
||||
ON CONFLICT(recipient_key, channel_id) DO UPDATE SET next_seq = next_seq + 1
|
||||
RETURNING next_seq - 1",
|
||||
params![recipient_key, channel_id],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
conn.execute(
|
||||
"INSERT INTO deliveries (recipient_key, channel_id, payload) VALUES (?1, ?2, ?3)",
|
||||
params![recipient_key, channel_id, payload],
|
||||
"INSERT INTO deliveries (recipient_key, channel_id, seq, payload) VALUES (?1, ?2, ?3, ?4)",
|
||||
params![recipient_key, channel_id, seq, payload],
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
Ok(())
|
||||
Ok(seq as u64)
|
||||
}
|
||||
|
||||
fn fetch(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<Vec<Vec<u8>>, StorageError> {
|
||||
fn fetch(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<Vec<(u64, Vec<u8>)>, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
|
||||
let mut stmt = conn
|
||||
.prepare(
|
||||
"SELECT id, payload FROM deliveries
|
||||
"SELECT id, seq, payload FROM deliveries
|
||||
WHERE recipient_key = ?1 AND channel_id = ?2
|
||||
ORDER BY id ASC",
|
||||
ORDER BY seq ASC",
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
let rows: Vec<(i64, Vec<u8>)> = stmt
|
||||
let rows: Vec<(i64, i64, Vec<u8>)> = stmt
|
||||
.query_map(params![recipient_key, channel_id], |row| {
|
||||
Ok((row.get(0)?, row.get(1)?))
|
||||
Ok((row.get(0)?, row.get(1)?, row.get(2)?))
|
||||
})
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
if !rows.is_empty() {
|
||||
let ids: Vec<i64> = rows.iter().map(|(id, _)| *id).collect();
|
||||
let ids: Vec<i64> = rows.iter().map(|(id, _, _)| *id).collect();
|
||||
let placeholders: String = ids.iter().map(|_| "?").collect::<Vec<_>>().join(",");
|
||||
let sql = format!("DELETE FROM deliveries WHERE id IN ({placeholders})");
|
||||
let params: Vec<&dyn rusqlite::types::ToSql> = ids
|
||||
@@ -187,7 +185,7 @@ impl Store for SqlStore {
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
}
|
||||
|
||||
Ok(rows.into_iter().map(|(_, payload)| payload).collect())
|
||||
Ok(rows.into_iter().map(|(_, seq, payload)| (seq as u64, payload)).collect())
|
||||
}
|
||||
|
||||
fn fetch_limited(
|
||||
@@ -195,28 +193,28 @@ impl Store for SqlStore {
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
limit: usize,
|
||||
) -> Result<Vec<Vec<u8>>, StorageError> {
|
||||
) -> Result<Vec<(u64, Vec<u8>)>, StorageError> {
|
||||
let conn = self.lock_conn()?;
|
||||
|
||||
let mut stmt = conn
|
||||
.prepare(
|
||||
"SELECT id, payload FROM deliveries
|
||||
"SELECT id, seq, payload FROM deliveries
|
||||
WHERE recipient_key = ?1 AND channel_id = ?2
|
||||
ORDER BY id ASC
|
||||
ORDER BY seq ASC
|
||||
LIMIT ?3",
|
||||
)
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
let rows: Vec<(i64, Vec<u8>)> = stmt
|
||||
let rows: Vec<(i64, i64, Vec<u8>)> = stmt
|
||||
.query_map(params![recipient_key, channel_id, limit as i64], |row| {
|
||||
Ok((row.get(0)?, row.get(1)?))
|
||||
Ok((row.get(0)?, row.get(1)?, row.get(2)?))
|
||||
})
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
|
||||
if !rows.is_empty() {
|
||||
let ids: Vec<i64> = rows.iter().map(|(id, _)| *id).collect();
|
||||
let ids: Vec<i64> = rows.iter().map(|(id, _, _)| *id).collect();
|
||||
let placeholders: String = ids.iter().map(|_| "?").collect::<Vec<_>>().join(",");
|
||||
let sql = format!("DELETE FROM deliveries WHERE id IN ({placeholders})");
|
||||
let params: Vec<&dyn rusqlite::types::ToSql> = ids
|
||||
@@ -227,7 +225,7 @@ impl Store for SqlStore {
|
||||
.map_err(|e| StorageError::Db(e.to_string()))?;
|
||||
}
|
||||
|
||||
Ok(rows.into_iter().map(|(_, payload)| payload).collect())
|
||||
Ok(rows.into_iter().map(|(_, seq, payload)| (seq as u64, payload)).collect())
|
||||
}
|
||||
|
||||
fn queue_depth(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<usize, StorageError> {
|
||||
@@ -406,16 +404,34 @@ impl<T> OptionalExt<T> for Result<T, rusqlite::Error> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::path::PathBuf;
|
||||
|
||||
fn open_in_memory() -> SqlStore {
|
||||
SqlStore::open(":memory:", "").unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sets_user_version_after_migrate() {
|
||||
let dir = tempfile::tempdir().expect("tempdir");
|
||||
let db_path: PathBuf = dir.path().join("store.db");
|
||||
|
||||
{
|
||||
let store = SqlStore::open(&db_path, "").expect("open store");
|
||||
let _guard = store.lock_conn().unwrap();
|
||||
}
|
||||
|
||||
let conn = rusqlite::Connection::open(&db_path).expect("reopen db");
|
||||
let version: i32 = conn
|
||||
.pragma_query_value(None, "user_version", |row| row.get(0))
|
||||
.expect("read user_version");
|
||||
|
||||
assert_eq!(version, SCHEMA_VERSION);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn key_package_fifo() {
|
||||
let store = open_in_memory();
|
||||
let mut identity = [0u8; 32];
|
||||
identity[..31].copy_from_slice(b"alice_identity_key__32bytes_lon");
|
||||
let identity = [1u8; 32];
|
||||
|
||||
store
|
||||
.upload_key_package(&identity, b"kp1".to_vec())
|
||||
@@ -441,11 +457,13 @@ mod tests {
|
||||
let rk = [1u8; 32];
|
||||
let ch = b"channel-1";
|
||||
|
||||
store.enqueue(&rk, ch, b"msg1".to_vec()).unwrap();
|
||||
store.enqueue(&rk, ch, b"msg2".to_vec()).unwrap();
|
||||
let seq0 = store.enqueue(&rk, ch, b"msg1".to_vec()).unwrap();
|
||||
let seq1 = store.enqueue(&rk, ch, b"msg2".to_vec()).unwrap();
|
||||
assert_eq!(seq0, 0);
|
||||
assert_eq!(seq1, 1);
|
||||
|
||||
let msgs = store.fetch(&rk, ch).unwrap();
|
||||
assert_eq!(msgs, vec![b"msg1".to_vec(), b"msg2".to_vec()]);
|
||||
assert_eq!(msgs, vec![(0u64, b"msg1".to_vec()), (1u64, b"msg2".to_vec())]);
|
||||
|
||||
assert!(store.fetch(&rk, ch).unwrap().is_empty());
|
||||
}
|
||||
@@ -461,10 +479,10 @@ mod tests {
|
||||
store.enqueue(&rk, ch, b"c".to_vec()).unwrap();
|
||||
|
||||
let msgs = store.fetch_limited(&rk, ch, 2).unwrap();
|
||||
assert_eq!(msgs, vec![b"a".to_vec(), b"b".to_vec()]);
|
||||
assert_eq!(msgs, vec![(0u64, b"a".to_vec()), (1u64, b"b".to_vec())]);
|
||||
|
||||
let remaining = store.fetch(&rk, ch).unwrap();
|
||||
assert_eq!(remaining, vec![b"c".to_vec()]);
|
||||
assert_eq!(remaining, vec![(2u64, b"c".to_vec())]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -482,23 +500,23 @@ mod tests {
|
||||
#[test]
|
||||
fn has_user_record_check() {
|
||||
let store = open_in_memory();
|
||||
assert!(!store.has_user_record("alice").unwrap());
|
||||
assert!(!store.has_user_record("user1").unwrap());
|
||||
store
|
||||
.store_user_record("alice", b"record".to_vec())
|
||||
.store_user_record("user1", b"record".to_vec())
|
||||
.unwrap();
|
||||
assert!(store.has_user_record("alice").unwrap());
|
||||
assert!(!store.has_user_record("bob").unwrap());
|
||||
assert!(store.has_user_record("user1").unwrap());
|
||||
assert!(!store.has_user_record("user2").unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn user_identity_key_round_trip() {
|
||||
let store = open_in_memory();
|
||||
assert!(store.get_user_identity_key("alice").unwrap().is_none());
|
||||
assert!(store.get_user_identity_key("user1").unwrap().is_none());
|
||||
store
|
||||
.store_user_identity_key("alice", vec![1u8; 32])
|
||||
.store_user_identity_key("user1", vec![1u8; 32])
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
store.get_user_identity_key("alice").unwrap(),
|
||||
store.get_user_identity_key("user1").unwrap(),
|
||||
Some(vec![1u8; 32])
|
||||
);
|
||||
}
|
||||
@@ -522,9 +540,9 @@ mod tests {
|
||||
store.enqueue(&rk, b"ch-b", b"b1".to_vec()).unwrap();
|
||||
|
||||
let a_msgs = store.fetch(&rk, b"ch-a").unwrap();
|
||||
assert_eq!(a_msgs, vec![b"a1".to_vec()]);
|
||||
assert_eq!(a_msgs, vec![(0u64, b"a1".to_vec())]);
|
||||
|
||||
let b_msgs = store.fetch(&rk, b"ch-b").unwrap();
|
||||
assert_eq!(b_msgs, vec![b"b1".to_vec()]);
|
||||
assert_eq!(b_msgs, vec![(0u64, b"b1".to_vec())]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,22 +32,30 @@ pub trait Store: Send + Sync {
|
||||
|
||||
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError>;
|
||||
|
||||
/// Enqueue a payload and return the monotonically increasing per-inbox sequence number
|
||||
/// assigned to this message. Clients sort by seq before MLS processing.
|
||||
fn enqueue(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
payload: Vec<u8>,
|
||||
) -> Result<(), StorageError>;
|
||||
) -> Result<u64, StorageError>;
|
||||
|
||||
fn fetch(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<Vec<Vec<u8>>, StorageError>;
|
||||
/// Fetch and drain all queued messages, returning `(seq, payload)` pairs ordered by seq.
|
||||
fn fetch(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<Vec<(u64, Vec<u8>)>, StorageError>;
|
||||
|
||||
/// Fetch up to `limit` messages without draining the entire queue (Fix 8).
|
||||
/// Returns `(seq, payload)` pairs ordered by seq.
|
||||
fn fetch_limited(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
limit: usize,
|
||||
) -> Result<Vec<Vec<u8>>, StorageError>;
|
||||
) -> Result<Vec<(u64, Vec<u8>)>, StorageError>;
|
||||
|
||||
/// Return the number of queued messages for (recipient, channel) (Fix 7).
|
||||
fn queue_depth(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<usize, StorageError>;
|
||||
@@ -123,6 +131,19 @@ struct QueueMapV2 {
|
||||
map: HashMap<ChannelKey, VecDeque<Vec<u8>>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default, Clone)]
|
||||
struct SeqEntry {
|
||||
seq: u64,
|
||||
data: Vec<u8>,
|
||||
}
|
||||
|
||||
/// V3 delivery store: each queue entry carries a monotonic per-inbox sequence number.
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
struct QueueMapV3 {
|
||||
map: HashMap<ChannelKey, VecDeque<SeqEntry>>,
|
||||
next_seq: HashMap<ChannelKey, u64>,
|
||||
}
|
||||
|
||||
/// File-backed storage for KeyPackages and delivery queues.
|
||||
///
|
||||
/// Each mutation flushes the entire map to disk. Suitable for MVP-scale loads.
|
||||
@@ -134,7 +155,7 @@ pub struct FileBackedStore {
|
||||
users_path: PathBuf,
|
||||
identity_keys_path: PathBuf,
|
||||
key_packages: Mutex<HashMap<Vec<u8>, VecDeque<Vec<u8>>>>,
|
||||
deliveries: Mutex<HashMap<ChannelKey, VecDeque<Vec<u8>>>>,
|
||||
deliveries: Mutex<QueueMapV3>,
|
||||
hybrid_keys: Mutex<HashMap<Vec<u8>, Vec<u8>>>,
|
||||
users: Mutex<HashMap<String, Vec<u8>>>,
|
||||
identity_keys: Mutex<HashMap<String, Vec<u8>>>,
|
||||
@@ -155,7 +176,7 @@ impl FileBackedStore {
|
||||
let identity_keys_path = dir.join("identity_keys.bin");
|
||||
|
||||
let key_packages = Mutex::new(Self::load_kp_map(&kp_path)?);
|
||||
let deliveries = Mutex::new(Self::load_delivery_map(&ds_path)?);
|
||||
let deliveries = Mutex::new(Self::load_delivery_map_v3(&ds_path)?);
|
||||
let hybrid_keys = Mutex::new(Self::load_hybrid_keys(&hk_path)?);
|
||||
let users = Mutex::new(Self::load_users(&users_path)?);
|
||||
let identity_keys = Mutex::new(Self::load_map_string_bytes(&identity_keys_path)?);
|
||||
@@ -201,28 +222,38 @@ impl FileBackedStore {
|
||||
fs::write(path, bytes).map_err(|e| StorageError::Io(e.to_string()))
|
||||
}
|
||||
|
||||
fn load_delivery_map(
|
||||
path: &Path,
|
||||
) -> Result<HashMap<ChannelKey, VecDeque<Vec<u8>>>, StorageError> {
|
||||
/// Load deliveries as V3. Falls back to V2 format (assigns seqs starting at 0).
|
||||
fn load_delivery_map_v3(path: &Path) -> Result<QueueMapV3, StorageError> {
|
||||
if !path.exists() {
|
||||
return Ok(HashMap::new());
|
||||
return Ok(QueueMapV3::default());
|
||||
}
|
||||
let bytes = fs::read(path).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
if bytes.is_empty() {
|
||||
return Ok(HashMap::new());
|
||||
return Ok(QueueMapV3::default());
|
||||
}
|
||||
bincode::deserialize::<QueueMapV2>(&bytes)
|
||||
.map(|v| v.map)
|
||||
.map_err(|_| StorageError::Io("deliveries file: v1 format no longer supported; delete or migrate".into()))
|
||||
// Try V3 first.
|
||||
if let Ok(v3) = bincode::deserialize::<QueueMapV3>(&bytes) {
|
||||
return Ok(v3);
|
||||
}
|
||||
// Fall back to V2: assign ascending seqs starting at 0 per channel.
|
||||
let v2 = bincode::deserialize::<QueueMapV2>(&bytes)
|
||||
.map_err(|_| StorageError::Io("deliveries file: unrecognised format".into()))?;
|
||||
let mut v3 = QueueMapV3::default();
|
||||
for (key, queue) in v2.map {
|
||||
let entries: VecDeque<SeqEntry> = queue
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, data)| SeqEntry { seq: i as u64, data })
|
||||
.collect();
|
||||
let next = entries.len() as u64;
|
||||
v3.next_seq.insert(key.clone(), next);
|
||||
v3.map.insert(key, entries);
|
||||
}
|
||||
Ok(v3)
|
||||
}
|
||||
|
||||
fn flush_delivery_map(
|
||||
&self,
|
||||
path: &Path,
|
||||
map: &HashMap<ChannelKey, VecDeque<Vec<u8>>>,
|
||||
) -> Result<(), StorageError> {
|
||||
let payload = QueueMapV2 { map: map.clone() };
|
||||
let bytes = bincode::serialize(&payload).map_err(|_| StorageError::Serde)?;
|
||||
fn flush_delivery_map(&self, path: &Path, map: &QueueMapV3) -> Result<(), StorageError> {
|
||||
let bytes = bincode::serialize(map).map_err(|_| StorageError::Serde)?;
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
@@ -309,27 +340,35 @@ impl Store for FileBackedStore {
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
payload: Vec<u8>,
|
||||
) -> Result<(), StorageError> {
|
||||
let mut map = lock(&self.deliveries)?;
|
||||
) -> Result<u64, StorageError> {
|
||||
let mut inner = lock(&self.deliveries)?;
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
map.entry(key).or_default().push_back(payload);
|
||||
self.flush_delivery_map(&self.ds_path, &*map)
|
||||
let seq = *inner.next_seq.entry(key.clone()).or_insert(0);
|
||||
*inner.next_seq.get_mut(&key).unwrap() = seq + 1;
|
||||
inner.map.entry(key).or_default().push_back(SeqEntry { seq, data: payload });
|
||||
self.flush_delivery_map(&self.ds_path, &*inner)?;
|
||||
Ok(seq)
|
||||
}
|
||||
|
||||
fn fetch(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<Vec<Vec<u8>>, StorageError> {
|
||||
let mut map = lock(&self.deliveries)?;
|
||||
fn fetch(
|
||||
&self,
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
) -> Result<Vec<(u64, Vec<u8>)>, StorageError> {
|
||||
let mut inner = lock(&self.deliveries)?;
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
let messages = map
|
||||
let messages: Vec<(u64, Vec<u8>)> = inner
|
||||
.map
|
||||
.get_mut(&key)
|
||||
.map(|q| q.drain(..).collect())
|
||||
.map(|q| q.drain(..).map(|e| (e.seq, e.data)).collect())
|
||||
.unwrap_or_default();
|
||||
self.flush_delivery_map(&self.ds_path, &*map)?;
|
||||
self.flush_delivery_map(&self.ds_path, &*inner)?;
|
||||
Ok(messages)
|
||||
}
|
||||
|
||||
@@ -338,30 +377,31 @@ impl Store for FileBackedStore {
|
||||
recipient_key: &[u8],
|
||||
channel_id: &[u8],
|
||||
limit: usize,
|
||||
) -> Result<Vec<Vec<u8>>, StorageError> {
|
||||
let mut map = lock(&self.deliveries)?;
|
||||
) -> Result<Vec<(u64, Vec<u8>)>, StorageError> {
|
||||
let mut inner = lock(&self.deliveries)?;
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
let messages = map
|
||||
let messages: Vec<(u64, Vec<u8>)> = inner
|
||||
.map
|
||||
.get_mut(&key)
|
||||
.map(|q| {
|
||||
let count = limit.min(q.len());
|
||||
q.drain(..count).collect()
|
||||
q.drain(..count).map(|e| (e.seq, e.data)).collect()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
self.flush_delivery_map(&self.ds_path, &*map)?;
|
||||
self.flush_delivery_map(&self.ds_path, &*inner)?;
|
||||
Ok(messages)
|
||||
}
|
||||
|
||||
fn queue_depth(&self, recipient_key: &[u8], channel_id: &[u8]) -> Result<usize, StorageError> {
|
||||
let map = lock(&self.deliveries)?;
|
||||
let inner = lock(&self.deliveries)?;
|
||||
let key = ChannelKey {
|
||||
channel_id: channel_id.to_vec(),
|
||||
recipient_key: recipient_key.to_vec(),
|
||||
};
|
||||
Ok(map.get(&key).map(|q| q.len()).unwrap_or(0))
|
||||
Ok(inner.map.get(&key).map(|q| q.len()).unwrap_or(0))
|
||||
}
|
||||
|
||||
fn gc_expired_messages(&self, _max_age_secs: u64) -> Result<usize, StorageError> {
|
||||
|
||||
72
crates/quicnprotochat-server/src/tls.rs
Normal file
72
crates/quicnprotochat-server/src/tls.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::Context;
|
||||
use quinn::ServerConfig;
|
||||
use quinn_proto::crypto::rustls::QuicServerConfig;
|
||||
use rcgen::generate_simple_self_signed;
|
||||
use rustls::pki_types::{CertificateDer, PrivateKeyDer};
|
||||
use rustls::version::TLS13;
|
||||
|
||||
/// Ensure a self-signed certificate exists on disk and return a QUIC server config.
|
||||
/// When `production` is true, cert and key must already exist (no auto-generation).
|
||||
pub fn build_server_config(
|
||||
cert_path: &PathBuf,
|
||||
key_path: &PathBuf,
|
||||
production: bool,
|
||||
) -> anyhow::Result<ServerConfig> {
|
||||
if !cert_path.exists() || !key_path.exists() {
|
||||
if production {
|
||||
anyhow::bail!(
|
||||
"TLS cert or key missing at {:?} / {:?}; production mode forbids auto-generation",
|
||||
cert_path,
|
||||
key_path
|
||||
);
|
||||
}
|
||||
generate_self_signed_cert(cert_path, key_path)?;
|
||||
}
|
||||
|
||||
let cert_bytes = std::fs::read(cert_path).context("read cert")?;
|
||||
let key_bytes = std::fs::read(key_path).context("read key")?;
|
||||
|
||||
let cert_chain = vec![CertificateDer::from(cert_bytes)];
|
||||
let key = PrivateKeyDer::try_from(key_bytes).map_err(|_| anyhow::anyhow!("invalid key"))?;
|
||||
|
||||
let mut tls = rustls::ServerConfig::builder_with_protocol_versions(&[&TLS13])
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(cert_chain, key)?;
|
||||
tls.alpn_protocols = vec![b"capnp".to_vec()];
|
||||
|
||||
let crypto = QuicServerConfig::try_from(tls)
|
||||
.map_err(|e| anyhow::anyhow!("invalid server TLS config: {e}"))?;
|
||||
|
||||
Ok(ServerConfig::with_crypto(std::sync::Arc::new(crypto)))
|
||||
}
|
||||
|
||||
fn generate_self_signed_cert(cert_path: &PathBuf, key_path: &PathBuf) -> anyhow::Result<()> {
|
||||
if let Some(parent) = cert_path.parent() {
|
||||
std::fs::create_dir_all(parent).context("create cert dir")?;
|
||||
}
|
||||
if let Some(parent) = key_path.parent() {
|
||||
std::fs::create_dir_all(parent).context("create key dir")?;
|
||||
}
|
||||
|
||||
let subject_alt_names = vec![
|
||||
"localhost".to_string(),
|
||||
"127.0.0.1".to_string(),
|
||||
"::1".to_string(),
|
||||
];
|
||||
|
||||
let issued = generate_simple_self_signed(subject_alt_names)?;
|
||||
let key_der = issued.key_pair.serialize_der();
|
||||
|
||||
std::fs::write(cert_path, issued.cert.der()).context("write cert")?;
|
||||
std::fs::write(key_path, &key_der).context("write key")?;
|
||||
|
||||
tracing::info!(
|
||||
cert = %cert_path.display(),
|
||||
key = %key_path.display(),
|
||||
"generated self-signed TLS certificate"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Reference in New Issue
Block a user