feat: Sprint 1 — production hardening, TLS lifecycle, CI coverage, lint cleanup

- Fix 3 client panics: replace .unwrap()/.expect() with proper error
  handling in rpc.rs (AUTH_CONTEXT lock), repl.rs (pending_member),
  and retry.rs (last_err)
- Add --danger-accept-invalid-certs flag with InsecureServerCertVerifier
  for development TLS bypass, plus mdBook TLS documentation
- Add CI coverage job (cargo-tarpaulin) and Docker build validation
  to GitHub Actions workflow, plus README CI badge
- Add [workspace.lints] config, fix 46 clippy warnings across 8 crates,
  zero warnings on all buildable crates
- Update Dockerfile for all 11 workspace members
This commit is contained in:
2026-03-03 23:19:11 +01:00
parent dc4e4e49a0
commit 612b06aa8e
33 changed files with 388 additions and 67 deletions

View File

@@ -72,3 +72,54 @@ jobs:
run: | run: |
cargo install cargo-audit --locked cargo install cargo-audit --locked
cargo audit cargo audit
coverage:
name: Coverage
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-action@stable
- name: Install capnp
run: sudo apt-get update && sudo apt-get install -y capnproto
- name: Cache cargo
uses: actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: ${{ runner.os }}-coverage-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-coverage-
- name: Install cargo-tarpaulin
run: cargo install cargo-tarpaulin
- name: Run coverage
run: |
cargo tarpaulin --workspace \
--exclude quicproquo-gui \
--exclude quicproquo-mobile \
--exclude quicproquo-p2p \
--out xml \
--output-dir coverage/ \
-- --test-threads 1
- name: Upload coverage report
uses: actions/upload-artifact@v4
with:
name: coverage-report
path: coverage/cobertura.xml
docker:
name: Docker Build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Build Docker image
run: docker build -f docker/Dockerfile .

View File

@@ -80,6 +80,12 @@ clap = { version = "4", features = ["derive", "env"] }
# ── Build-time ──────────────────────────────────────────────────────────────── # ── Build-time ────────────────────────────────────────────────────────────────
capnpc = { version = "0.19" } capnpc = { version = "0.19" }
[workspace.lints.rust]
unsafe_code = "warn"
[workspace.lints.clippy]
unwrap_used = "warn"
[profile.release] [profile.release]
opt-level = 3 opt-level = 3
lto = "thin" lto = "thin"

View File

@@ -4,6 +4,8 @@
# QPQ — quicproquo # QPQ — quicproquo
[![CI](https://github.com/nickvidal/quicproquo/actions/workflows/ci.yml/badge.svg)](https://github.com/nickvidal/quicproquo/actions/workflows/ci.yml)
> End-to-end encrypted messaging over **QUIC + TLS 1.3 + MLS** (RFC 9420), written in Rust. > End-to-end encrypted messaging over **QUIC + TLS 1.3 + MLS** (RFC 9420), written in Rust.
The server never sees plaintext. Every byte on the wire is protected by a QUIC The server never sees plaintext. Every byte on the wire is protected by a QUIC

View File

@@ -5,6 +5,9 @@ edition = "2021"
description = "Bot SDK for quicproquo — build automated agents on E2E encrypted messaging." description = "Bot SDK for quicproquo — build automated agents on E2E encrypted messaging."
license = "MIT" license = "MIT"
[lints]
workspace = true
[dependencies] [dependencies]
quicproquo-core = { path = "../quicproquo-core" } quicproquo-core = { path = "../quicproquo-core" }
quicproquo-proto = { path = "../quicproquo-proto" } quicproquo-proto = { path = "../quicproquo-proto" }

View File

@@ -71,6 +71,9 @@ quicproquo-p2p = { path = "../quicproquo-p2p", optional = true }
ratatui = { version = "0.29", optional = true, default-features = false, features = ["crossterm"] } ratatui = { version = "0.29", optional = true, default-features = false, features = ["crossterm"] }
crossterm = { version = "0.28", optional = true } crossterm = { version = "0.28", optional = true }
[lints]
workspace = true
[features] [features]
# Enable mesh-mode features: mDNS local peer discovery + P2P transport. # Enable mesh-mode features: mDNS local peer discovery + P2P transport.
# Build: cargo build -p quicproquo-client --features mesh # Build: cargo build -p quicproquo-client --features mesh

View File

@@ -27,8 +27,8 @@ pub fn cmd_whoami(state_path: &Path, password: Option<&str>) -> anyhow::Result<(
let pk_bytes = identity.public_key_bytes(); let pk_bytes = identity.public_key_bytes();
let fingerprint = sha256(&pk_bytes); let fingerprint = sha256(&pk_bytes);
println!("identity_key : {}", hex::encode(&pk_bytes)); println!("identity_key : {}", hex::encode(pk_bytes));
println!("fingerprint : {}", hex::encode(&fingerprint)); println!("fingerprint : {}", hex::encode(fingerprint));
println!( println!(
"hybrid_key : {}", "hybrid_key : {}",
if state.hybrid_key.is_some() { if state.hybrid_key.is_some() {
@@ -203,6 +203,7 @@ pub async fn cmd_register_user(
} }
/// Log in via the OPAQUE protocol and receive a session token. /// Log in via the OPAQUE protocol and receive a session token.
#[allow(clippy::too_many_arguments)]
pub async fn cmd_login( pub async fn cmd_login(
server: &str, server: &str,
ca_cert: &Path, ca_cert: &Path,
@@ -522,7 +523,7 @@ async fn do_upload_keypackage(
anyhow::ensure!(server_fp == fingerprint, "fingerprint mismatch"); anyhow::ensure!(server_fp == fingerprint, "fingerprint mismatch");
if let Some(ref hkp) = hybrid_kp { if let Some(hkp) = &hybrid_kp {
upload_hybrid_key( upload_hybrid_key(
&node_client, &node_client,
&member.identity().public_key_bytes(), &member.identity().public_key_bytes(),
@@ -914,6 +915,7 @@ pub async fn cmd_join(
} }
/// Send an application message via DS (single recipient or broadcast to all other members). /// Send an application message via DS (single recipient or broadcast to all other members).
#[allow(clippy::too_many_arguments)]
pub async fn cmd_send( pub async fn cmd_send(
state_path: &Path, state_path: &Path,
server: &str, server: &str,
@@ -1115,8 +1117,8 @@ pub fn whoami_json(state_path: &Path, password: Option<&str>) -> anyhow::Result<
let fingerprint = sha256(&pk_bytes); let fingerprint = sha256(&pk_bytes);
Ok(format!( Ok(format!(
r#"{{"identity_key":"{}", "fingerprint":"{}", "hybrid_key":{}, "group":{}}}"#, r#"{{"identity_key":"{}", "fingerprint":"{}", "hybrid_key":{}, "group":{}}}"#,
hex::encode(&pk_bytes), hex::encode(pk_bytes),
hex::encode(&fingerprint), hex::encode(fingerprint),
state.hybrid_key.is_some(), state.hybrid_key.is_some(),
state.group.is_some(), state.group.is_some(),
)) ))

View File

@@ -169,7 +169,7 @@ impl ConversationStore {
let salt = get_or_create_salt(&salt_path)?; let salt = get_or_create_salt(&salt_path)?;
let key = derive_convdb_key(password, &salt)?; let key = derive_convdb_key(password, &salt)?;
let hex_key = hex::encode(&*key); let hex_key = hex::encode(*key);
let conn = Connection::open(db_path).context("open conversation db")?; let conn = Connection::open(db_path).context("open conversation db")?;
conn.pragma_update(None, "key", format!("x'{hex_key}'")) conn.pragma_update(None, "key", format!("x'{hex_key}'"))
@@ -188,7 +188,7 @@ impl ConversationStore {
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
let salt = get_or_create_salt(salt_path)?; let salt = get_or_create_salt(salt_path)?;
let key = derive_convdb_key(password, &salt)?; let key = derive_convdb_key(password, &salt)?;
let hex_key = hex::encode(&*key); let hex_key = hex::encode(*key);
let enc_path = db_path.with_extension("convdb-enc"); let enc_path = db_path.with_extension("convdb-enc");

View File

@@ -284,6 +284,7 @@ async fn ensure_server(
// ── REPL entry point ───────────────────────────────────────────────────────── // ── REPL entry point ─────────────────────────────────────────────────────────
#[allow(clippy::too_many_arguments)]
pub async fn run_repl( pub async fn run_repl(
state_path: &Path, state_path: &Path,
server: &str, server: &str,
@@ -497,6 +498,7 @@ async fn auto_upload_keys(
} }
/// Determine the access token, performing OPAQUE registration/login as needed. /// Determine the access token, performing OPAQUE registration/login as needed.
#[allow(clippy::too_many_arguments)]
async fn resolve_access_token( async fn resolve_access_token(
state_path: &Path, state_path: &Path,
server: &str, server: &str,
@@ -715,13 +717,11 @@ fn cmd_list(session: &SessionState) -> anyhow::Result<()> {
fn cmd_switch(session: &mut SessionState, target: &str) -> anyhow::Result<()> { fn cmd_switch(session: &mut SessionState, target: &str) -> anyhow::Result<()> {
let target = target.trim(); let target = target.trim();
let conv = if target.starts_with('@') { let conv = if let Some(username) = target.strip_prefix('@') {
let username = &target[1..];
session.conv_store.list_conversations()?.into_iter().find(|c| { session.conv_store.list_conversations()?.into_iter().find(|c| {
matches!(&c.kind, ConversationKind::Dm { peer_username: Some(u), .. } if u == username) matches!(&c.kind, ConversationKind::Dm { peer_username: Some(u), .. } if u == username)
}) })
} else if target.starts_with('#') { } else if let Some(name) = target.strip_prefix('#') {
let name = &target[1..];
session.conv_store.find_group_by_name(name)? session.conv_store.find_group_by_name(name)?
} else { } else {
// Try as display name // Try as display name
@@ -861,7 +861,7 @@ async fn cmd_dm(
display_name: format!("@{username}"), display_name: format!("@{username}"),
mls_group_blob: member mls_group_blob: member
.group_ref() .group_ref()
.map(|g| bincode::serialize(g)) .map(bincode::serialize)
.transpose() .transpose()
.context("serialize group")?, .context("serialize group")?,
keystore_blob: None, keystore_blob: None,
@@ -905,7 +905,7 @@ fn cmd_create_group(session: &mut SessionState, name: &str) -> anyhow::Result<()
display_name: format!("#{name}"), display_name: format!("#{name}"),
mls_group_blob: member mls_group_blob: member
.group_ref() .group_ref()
.map(|g| bincode::serialize(g)) .map(bincode::serialize)
.transpose() .transpose()
.context("serialize group")?, .context("serialize group")?,
keystore_blob: None, keystore_blob: None,
@@ -1099,7 +1099,7 @@ async fn cmd_join(
// Try to process with existing groups first // Try to process with existing groups first
let mut handled = false; let mut handled = false;
for (_cid, member) in &mut session.members { for member in session.members.values_mut() {
match member.receive_message(&mls_payload) { match member.receive_message(&mls_payload) {
Ok(_) => { handled = true; break; } Ok(_) => { handled = true; break; }
Err(_) => continue, Err(_) => continue,
@@ -1147,7 +1147,7 @@ async fn cmd_join(
display_name: format!("#{display}"), display_name: format!("#{display}"),
mls_group_blob: new_member mls_group_blob: new_member
.group_ref() .group_ref()
.map(|g| bincode::serialize(g)) .map(bincode::serialize)
.transpose() .transpose()
.context("serialize joined group")?, .context("serialize joined group")?,
keystore_blob: None, keystore_blob: None,
@@ -1570,7 +1570,13 @@ async fn try_auto_join(
} }
// Take ownership of the pending member. // Take ownership of the pending member.
let member = session.pending_member.take().unwrap(); let member = match session.pending_member.take() {
Some(m) => m,
None => {
tracing::error!("pending_member disappeared after successful join");
return false;
}
};
let member_keys = member.member_identities(); let member_keys = member.member_identities();
// Figure out the peer (any member that isn't us). // Figure out the peer (any member that isn't us).

View File

@@ -25,15 +25,13 @@ where
Fut: Future<Output = Result<T, E>>, Fut: Future<Output = Result<T, E>>,
P: Fn(&E) -> bool, P: Fn(&E) -> bool,
{ {
let mut last_err = None; let mut last_err: Option<E> = None;
for attempt in 0..max_retries { for attempt in 0..max_retries {
match op().await { match op().await {
Ok(t) => return Ok(t), Ok(t) => return Ok(t),
Err(e) => { Err(e) => {
last_err = Some(e); if !is_retriable(&e) || attempt + 1 >= max_retries {
let err = last_err.as_ref().unwrap(); return Err(e);
if !is_retriable(err) || attempt + 1 >= max_retries {
break;
} }
let delay_ms = base_delay_ms * 2u64.saturating_pow(attempt); let delay_ms = base_delay_ms * 2u64.saturating_pow(attempt);
let jitter_ms = rand::thread_rng().gen_range(0..=delay_ms / 2); let jitter_ms = rand::thread_rng().gen_range(0..=delay_ms / 2);
@@ -44,6 +42,7 @@ where
delay_ms = total_ms, delay_ms = total_ms,
"RPC failed, retrying after backoff" "RPC failed, retrying after backoff"
); );
last_err = Some(e);
tokio::time::sleep(Duration::from_millis(total_ms)).await; tokio::time::sleep(Duration::from_millis(total_ms)).await;
} }
} }

View File

@@ -5,7 +5,7 @@ use std::sync::Arc;
use anyhow::Context; use anyhow::Context;
use quinn::{ClientConfig, Endpoint}; use quinn::{ClientConfig, Endpoint};
use quinn_proto::crypto::rustls::QuicClientConfig; use quinn_proto::crypto::rustls::QuicClientConfig;
use rustls::pki_types::CertificateDer; use rustls::pki_types::{CertificateDer, ServerName, UnixTime};
use rustls::{ClientConfig as RustlsClientConfig, RootCertStore}; use rustls::{ClientConfig as RustlsClientConfig, RootCertStore};
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt}; use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
use capnp_rpc::{rpc_twoparty_capnp::Side, twoparty, RpcSystem}; use capnp_rpc::{rpc_twoparty_capnp::Side, twoparty, RpcSystem};
@@ -13,34 +13,101 @@ use capnp_rpc::{rpc_twoparty_capnp::Side, twoparty, RpcSystem};
use quicproquo_core::HybridPublicKey; use quicproquo_core::HybridPublicKey;
use quicproquo_proto::node_capnp::{auth, node_service}; use quicproquo_proto::node_capnp::{auth, node_service};
use crate::AUTH_CONTEXT; use crate::{AUTH_CONTEXT, INSECURE_SKIP_VERIFY};
use super::retry::{anyhow_is_retriable, retry_async, DEFAULT_BASE_DELAY_MS, DEFAULT_MAX_RETRIES}; use super::retry::{anyhow_is_retriable, retry_async, DEFAULT_BASE_DELAY_MS, DEFAULT_MAX_RETRIES};
/// Cap'n Proto traversal limit (words). 4 Mi words = 32 MiB; bounds DoS from deeply nested or large messages. /// Cap'n Proto traversal limit (words). 4 Mi words = 32 MiB; bounds DoS from deeply nested or large messages.
const CAPNP_TRAVERSAL_LIMIT_WORDS: usize = 4 * 1024 * 1024; const CAPNP_TRAVERSAL_LIMIT_WORDS: usize = 4 * 1024 * 1024;
/// A [`rustls::client::danger::ServerCertVerifier`] that accepts any certificate.
///
/// **Development only.** Using this in production disables all TLS guarantees.
#[derive(Debug)]
struct InsecureServerCertVerifier;
impl rustls::client::danger::ServerCertVerifier for InsecureServerCertVerifier {
fn verify_server_cert(
&self,
_end_entity: &CertificateDer<'_>,
_intermediates: &[CertificateDer<'_>],
_server_name: &ServerName<'_>,
_ocsp_response: &[u8],
_now: UnixTime,
) -> Result<rustls::client::danger::ServerCertVerified, rustls::Error> {
Ok(rustls::client::danger::ServerCertVerified::assertion())
}
fn verify_tls12_signature(
&self,
_message: &[u8],
_cert: &CertificateDer<'_>,
_dss: &rustls::DigitallySignedStruct,
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
}
fn verify_tls13_signature(
&self,
_message: &[u8],
_cert: &CertificateDer<'_>,
_dss: &rustls::DigitallySignedStruct,
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
}
fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
rustls::crypto::ring::default_provider()
.signature_verification_algorithms
.supported_schemes()
}
}
/// Establish a QUIC/TLS connection and return a `NodeService` client. /// Establish a QUIC/TLS connection and return a `NodeService` client.
/// ///
/// Must be called from within a `LocalSet` because capnp-rpc is `!Send`. /// Must be called from within a `LocalSet` because capnp-rpc is `!Send`.
///
/// Reads [`INSECURE_SKIP_VERIFY`] to decide whether to bypass certificate
/// verification (set once at startup via [`crate::set_insecure_skip_verify`]).
pub async fn connect_node( pub async fn connect_node(
server: &str, server: &str,
ca_cert: &Path, ca_cert: &Path,
server_name: &str, server_name: &str,
) -> anyhow::Result<node_service::Client> {
let skip = INSECURE_SKIP_VERIFY.load(std::sync::atomic::Ordering::Relaxed);
connect_node_opt(server, ca_cert, server_name, skip).await
}
/// Like [`connect_node`] but with an explicit `insecure_skip_verify` toggle.
///
/// When `insecure_skip_verify` is `true`, certificate verification is disabled entirely.
/// This is intended for development and testing only.
pub async fn connect_node_opt(
server: &str,
ca_cert: &Path,
server_name: &str,
insecure_skip_verify: bool,
) -> anyhow::Result<node_service::Client> { ) -> anyhow::Result<node_service::Client> {
let addr: SocketAddr = server let addr: SocketAddr = server
.parse() .parse()
.with_context(|| format!("server must be host:port, got {server}"))?; .with_context(|| format!("server must be host:port, got {server}"))?;
let cert_bytes = std::fs::read(ca_cert).with_context(|| format!("read ca_cert {ca_cert:?}"))?; let mut tls = if insecure_skip_verify {
let mut roots = RootCertStore::empty(); RustlsClientConfig::builder()
roots .dangerous()
.add(CertificateDer::from(cert_bytes)) .with_custom_certificate_verifier(Arc::new(InsecureServerCertVerifier))
.context("add root cert")?; .with_no_client_auth()
} else {
let mut tls = RustlsClientConfig::builder() let cert_bytes =
.with_root_certificates(roots) std::fs::read(ca_cert).with_context(|| format!("read ca_cert {ca_cert:?}"))?;
.with_no_client_auth(); let mut roots = RootCertStore::empty();
roots
.add(CertificateDer::from(cert_bytes))
.context("add root cert")?;
RustlsClientConfig::builder()
.with_root_certificates(roots)
.with_no_client_auth()
};
tls.alpn_protocols = vec![b"capnp".to_vec()]; tls.alpn_protocols = vec![b"capnp".to_vec()];
let crypto = QuicClientConfig::try_from(tls) let crypto = QuicClientConfig::try_from(tls)
@@ -76,7 +143,9 @@ pub async fn connect_node(
} }
pub fn set_auth(auth: &mut auth::Builder<'_>) -> anyhow::Result<()> { pub fn set_auth(auth: &mut auth::Builder<'_>) -> anyhow::Result<()> {
let guard = AUTH_CONTEXT.read().expect("AUTH_CONTEXT poisoned"); let guard = AUTH_CONTEXT
.read()
.map_err(|e| anyhow::anyhow!("AUTH_CONTEXT lock poisoned: {e}"))?;
let ctx = guard.as_ref().ok_or_else(|| { let ctx = guard.as_ref().ok_or_else(|| {
anyhow::anyhow!( anyhow::anyhow!(
"init_auth must be called before RPCs (use a bearer or session token for authenticated commands)" "init_auth must be called before RPCs (use a bearer or session token for authenticated commands)"
@@ -257,7 +326,6 @@ pub async fn fetch_wait(
|| { || {
let client = client.clone(); let client = client.clone();
let recipient_key = recipient_key.clone(); let recipient_key = recipient_key.clone();
let timeout_ms = timeout_ms;
async move { async move {
let mut req = client.fetch_wait_request(); let mut req = client.fetch_wait_request();
{ {

View File

@@ -51,7 +51,7 @@ impl SessionState {
let hybrid_kp = state let hybrid_kp = state
.hybrid_key .hybrid_key
.as_ref() .as_ref()
.map(|b| HybridKeypair::from_bytes(b)) .map(HybridKeypair::from_bytes)
.transpose() .transpose()
.context("decode hybrid key")?; .context("decode hybrid key")?;
@@ -109,7 +109,7 @@ impl SessionState {
// Use the first 16 bytes of the group_id as the ConversationId. // Use the first 16 bytes of the group_id as the ConversationId.
let conv_id = if group_id_bytes.len() >= 16 { let conv_id = if group_id_bytes.len() >= 16 {
ConversationId::from_slice(&group_id_bytes[..16]) ConversationId::from_slice(&group_id_bytes[..16])
.unwrap_or_else(|| ConversationId([0; 16])) .unwrap_or(ConversationId([0; 16]))
} else { } else {
ConversationId::from_group_name(&hex::encode(&group_id_bytes)) ConversationId::from_group_name(&hex::encode(&group_id_bytes))
}; };
@@ -188,7 +188,7 @@ impl SessionState {
let member = self.members.get(conv_id).context("no such conversation")?; let member = self.members.get(conv_id).context("no such conversation")?;
let blob = member let blob = member
.group_ref() .group_ref()
.map(|g| bincode::serialize(g)) .map(bincode::serialize)
.transpose() .transpose()
.context("serialize MLS group")?; .context("serialize MLS group")?;

View File

@@ -15,6 +15,7 @@
//! docs for details. //! docs for details.
use std::sync::RwLock; use std::sync::RwLock;
use std::sync::atomic::{AtomicBool, Ordering};
pub mod client; pub mod client;
@@ -26,11 +27,23 @@ pub use client::commands::{
}; };
pub use client::repl::run_repl; pub use client::repl::run_repl;
pub use client::rpc::{connect_node, create_channel, enqueue, fetch_wait, resolve_user}; pub use client::rpc::{connect_node, connect_node_opt, create_channel, enqueue, fetch_wait, resolve_user};
// Global auth context — RwLock so the REPL can set it after OPAQUE login. // Global auth context — RwLock so the REPL can set it after OPAQUE login.
pub(crate) static AUTH_CONTEXT: RwLock<Option<ClientAuth>> = RwLock::new(None); pub(crate) static AUTH_CONTEXT: RwLock<Option<ClientAuth>> = RwLock::new(None);
/// When `true`, [`connect_node`] skips TLS certificate verification.
/// Set via [`set_insecure_skip_verify`]; read by the RPC layer.
pub(crate) static INSECURE_SKIP_VERIFY: AtomicBool = AtomicBool::new(false);
/// Enable or disable insecure (no-verify) TLS mode globally.
///
/// **Development only.** When enabled, all outgoing connections skip certificate
/// verification, making them vulnerable to MITM attacks.
pub fn set_insecure_skip_verify(enabled: bool) {
INSECURE_SKIP_VERIFY.store(enabled, Ordering::Relaxed);
}
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct ClientAuth { pub struct ClientAuth {
pub(crate) version: u16, pub(crate) version: u16,

View File

@@ -1,6 +1,6 @@
//! quicproquo CLI client. //! quicproquo CLI client.
use std::path::PathBuf; use std::path::{Path, PathBuf};
use anyhow::Context; use anyhow::Context;
use clap::{Parser, Subcommand}; use clap::{Parser, Subcommand};
@@ -9,7 +9,7 @@ use quicproquo_client::{
cmd_chat, cmd_check_key, cmd_create_group, cmd_demo_group, cmd_export, cmd_export_verify, cmd_chat, cmd_check_key, cmd_create_group, cmd_demo_group, cmd_export, cmd_export_verify,
cmd_fetch_key, cmd_health, cmd_invite, cmd_join, cmd_login, cmd_ping, cmd_recv, cmd_register, cmd_fetch_key, cmd_health, cmd_invite, cmd_join, cmd_login, cmd_ping, cmd_recv, cmd_register,
cmd_register_state, cmd_refresh_keypackage, cmd_register_user, cmd_send, cmd_whoami, cmd_register_state, cmd_refresh_keypackage, cmd_register_user, cmd_send, cmd_whoami,
init_auth, run_repl, ClientAuth, init_auth, run_repl, set_insecure_skip_verify, ClientAuth,
}; };
#[cfg(feature = "tui")] #[cfg(feature = "tui")]
use quicproquo_client::client::tui::run_tui; use quicproquo_client::client::tui::run_tui;
@@ -56,6 +56,15 @@ struct Args {
#[arg(long, global = true, env = "QPQ_STATE_PASSWORD")] #[arg(long, global = true, env = "QPQ_STATE_PASSWORD")]
state_password: Option<String>, state_password: Option<String>,
/// DANGER: Skip TLS certificate verification. Development only.
/// Disables all certificate checks, making the connection vulnerable to MITM attacks.
#[arg(
long = "danger-accept-invalid-certs",
global = true,
env = "QPQ_DANGER_ACCEPT_INVALID_CERTS"
)]
danger_accept_invalid_certs: bool,
// ── Default-repl args (used when no subcommand is given) ───────── // ── Default-repl args (used when no subcommand is given) ─────────
/// State file path (identity + MLS state). Used when running the default REPL. /// State file path (identity + MLS state). Used when running the default REPL.
#[arg(long, default_value = "qpq-state.bin", env = "QPQ_STATE")] #[arg(long, default_value = "qpq-state.bin", env = "QPQ_STATE")]
@@ -393,7 +402,7 @@ enum Command {
/// `state` unchanged. This lets `qpq --username alice` automatically isolate /// `state` unchanged. This lets `qpq --username alice` automatically isolate
/// Alice's state without requiring a manual `--state` flag. /// Alice's state without requiring a manual `--state` flag.
fn derive_state_path(state: PathBuf, username: Option<&str>) -> PathBuf { fn derive_state_path(state: PathBuf, username: Option<&str>) -> PathBuf {
if state == PathBuf::from("qpq-state.bin") { if state == Path::new("qpq-state.bin") {
if let Some(uname) = username { if let Some(uname) = username {
return PathBuf::from(format!("qpq-{uname}.bin")); return PathBuf::from(format!("qpq-{uname}.bin"));
} }
@@ -417,6 +426,11 @@ async fn main() -> anyhow::Result<()> {
let args = Args::parse(); let args = Args::parse();
if args.danger_accept_invalid_certs {
eprintln!("WARNING: TLS verification disabled — insecure mode");
set_insecure_skip_verify(true);
}
// For the REPL and TUI, defer init_auth so they can resolve their own token via OPAQUE. // For the REPL and TUI, defer init_auth so they can resolve their own token via OPAQUE.
// For all other subcommands, initialize auth immediately. // For all other subcommands, initialize auth immediately.
#[cfg(not(feature = "tui"))] #[cfg(not(feature = "tui"))]

View File

@@ -43,6 +43,9 @@ tokio = { workspace = true }
# Error handling # Error handling
thiserror = { workspace = true } thiserror = { workspace = true }
[lints]
workspace = true
[dev-dependencies] [dev-dependencies]
tokio = { workspace = true } tokio = { workspace = true }
criterion = { version = "0.5", features = ["html_reports"] } criterion = { version = "0.5", features = ["html_reports"] }

View File

@@ -145,10 +145,10 @@ pub fn parse(bytes: &[u8]) -> Result<(MessageType, AppMessage), CoreError> {
} }
let version = bytes[0]; let version = bytes[0];
if version != VERSION { if version != VERSION {
return Err(CoreError::AppMessage(format!("unsupported version {version}").into())); return Err(CoreError::AppMessage(format!("unsupported version {version}")));
} }
let msg_type = MessageType::from_byte(bytes[1]) let msg_type = MessageType::from_byte(bytes[1])
.ok_or_else(|| CoreError::AppMessage(format!("unknown message type {}", bytes[1]).into()))?; .ok_or_else(|| CoreError::AppMessage(format!("unknown message type {}", bytes[1])))?;
let payload = &bytes[2..]; let payload = &bytes[2..];
let app = match msg_type { let app = match msg_type {

View File

@@ -29,7 +29,7 @@ fn bucket_for(content_len: usize) -> usize {
} }
} }
// Larger than biggest bucket: round up to nearest 16384-byte multiple. // Larger than biggest bucket: round up to nearest 16384-byte multiple.
((total + 16383) / 16384) * 16384 total.div_ceil(16384) * 16384
} }
/// Pad a payload to the next bucket boundary with cryptographic random bytes. /// Pad a payload to the next bucket boundary with cryptographic random bytes.

View File

@@ -9,5 +9,8 @@ license = "MIT"
name = "qpq-gen" name = "qpq-gen"
path = "src/main.rs" path = "src/main.rs"
[lints]
workspace = true
[dependencies] [dependencies]
clap = { workspace = true } clap = { workspace = true }

View File

@@ -5,6 +5,9 @@ edition = "2021"
description = "Key Transparency: append-only SHA-256 Merkle log for (username, identity_key) bindings." description = "Key Transparency: append-only SHA-256 Merkle log for (username, identity_key) bindings."
license = "MIT" license = "MIT"
[lints]
workspace = true
[dependencies] [dependencies]
sha2 = { workspace = true } sha2 = { workspace = true }
thiserror = { workspace = true } thiserror = { workspace = true }

View File

@@ -5,6 +5,9 @@ edition = "2021"
description = "P2P transport layer for quicproquo using iroh." description = "P2P transport layer for quicproquo using iroh."
license = "MIT" license = "MIT"
[lints]
workspace = true
[dependencies] [dependencies]
iroh = "0.96" iroh = "0.96"
tokio = { version = "1", features = ["macros", "rt-multi-thread", "time", "sync"] } tokio = { version = "1", features = ["macros", "rt-multi-thread", "time", "sync"] }

View File

@@ -5,5 +5,8 @@ edition = "2021"
description = "C-ABI vtable for quicproquo server plugins. No std dependency; usable from bare-metal plugin authors." description = "C-ABI vtable for quicproquo server plugins. No std dependency; usable from bare-metal plugin authors."
license = "MIT" license = "MIT"
[lints]
workspace = true
# No dependencies — intentionally minimal so plugin authors have zero forced transitive deps. # No dependencies — intentionally minimal so plugin authors have zero forced transitive deps.
[dependencies] [dependencies]

View File

@@ -186,5 +186,7 @@ pub struct HookVTable {
// responsible for its own thread safety. The server only calls hook functions // responsible for its own thread safety. The server only calls hook functions
// one at a time per plugin (wrapped in a single Arc). Plugins that mutate // one at a time per plugin (wrapped in a single Arc). Plugins that mutate
// user_data through callbacks must use interior mutability. // user_data through callbacks must use interior mutability.
#[allow(unsafe_code)]
unsafe impl Send for HookVTable {} unsafe impl Send for HookVTable {}
#[allow(unsafe_code)]
unsafe impl Sync for HookVTable {} unsafe impl Sync for HookVTable {}

View File

@@ -11,5 +11,12 @@ build = "build.rs"
[dependencies] [dependencies]
capnp = { workspace = true } capnp = { workspace = true }
[lints.rust]
unsafe_code = "warn"
[lints.clippy]
# Generated Cap'n Proto code uses patterns that trigger clippy lints.
unwrap_used = "allow"
[build-dependencies] [build-dependencies]
capnpc = { workspace = true } capnpc = { workspace = true }

View File

@@ -64,5 +64,8 @@ metrics-exporter-prometheus = "0.15"
# mDNS service announcement for local mesh / Freifunk node discovery. # mDNS service announcement for local mesh / Freifunk node discovery.
mdns-sd = "0.12" mdns-sd = "0.12"
[lints]
workspace = true
[dev-dependencies] [dev-dependencies]
tempfile = "3" tempfile = "3"

View File

@@ -178,7 +178,7 @@ pub fn validate_auth_context(
Err(crate::error_codes::coded_error(E003_INVALID_TOKEN, "invalid accessToken")) Err(crate::error_codes::coded_error(E003_INVALID_TOKEN, "invalid accessToken"))
} }
pub fn require_identity<'a>(auth_ctx: &'a AuthContext) -> Result<&'a [u8], capnp::Error> { pub fn require_identity(auth_ctx: &AuthContext) -> Result<&[u8], capnp::Error> {
match auth_ctx.identity_key.as_deref() { match auth_ctx.identity_key.as_deref() {
Some(ik) => Ok(ik), Some(ik) => Ok(ik),
None => Err(crate::error_codes::coded_error( None => Err(crate::error_codes::coded_error(

View File

@@ -121,7 +121,7 @@ pub fn merge_config(args: &crate::Args, file: &FileConfig) -> EffectiveConfig {
args.data_dir.clone() args.data_dir.clone()
}; };
let tls_cert = if args.tls_cert == PathBuf::from(DEFAULT_TLS_CERT) { let tls_cert = if args.tls_cert == Path::new(DEFAULT_TLS_CERT) {
file.tls_cert file.tls_cert
.clone() .clone()
.unwrap_or_else(|| PathBuf::from(DEFAULT_TLS_CERT)) .unwrap_or_else(|| PathBuf::from(DEFAULT_TLS_CERT))
@@ -129,7 +129,7 @@ pub fn merge_config(args: &crate::Args, file: &FileConfig) -> EffectiveConfig {
args.tls_cert.clone() args.tls_cert.clone()
}; };
let tls_key = if args.tls_key == PathBuf::from(DEFAULT_TLS_KEY) { let tls_key = if args.tls_key == Path::new(DEFAULT_TLS_KEY) {
file.tls_key file.tls_key
.clone() .clone()
.unwrap_or_else(|| PathBuf::from(DEFAULT_TLS_KEY)) .unwrap_or_else(|| PathBuf::from(DEFAULT_TLS_KEY))
@@ -159,7 +159,7 @@ pub fn merge_config(args: &crate::Args, file: &FileConfig) -> EffectiveConfig {
args.store_backend.clone() args.store_backend.clone()
}; };
let db_path = if args.db_path == PathBuf::from(DEFAULT_DB_PATH) { let db_path = if args.db_path == Path::new(DEFAULT_DB_PATH) {
file.db_path file.db_path
.clone() .clone()
.unwrap_or_else(|| PathBuf::from(DEFAULT_DB_PATH)) .unwrap_or_else(|| PathBuf::from(DEFAULT_DB_PATH))

View File

@@ -22,6 +22,7 @@ mod federation;
pub mod hooks; pub mod hooks;
mod metrics; mod metrics;
mod node_service; mod node_service;
#[allow(unsafe_code)] // FFI: C-ABI plugin interaction requires unsafe blocks
mod plugin_loader; mod plugin_loader;
mod sql_store; mod sql_store;
mod tls; mod tls;
@@ -213,7 +214,7 @@ async fn main() -> anyhow::Result<()> {
} }
Arc::new(SqlStore::open(&effective.db_path, &effective.db_key)?) Arc::new(SqlStore::open(&effective.db_path, &effective.db_key)?)
} }
"file" | _ => { _ => {
tracing::info!(dir = %effective.data_dir, "opening file-backed store"); tracing::info!(dir = %effective.data_dir, "opening file-backed store");
Arc::new(FileBackedStore::open(&effective.data_dir)?) Arc::new(FileBackedStore::open(&effective.data_dir)?)
} }

View File

@@ -53,7 +53,7 @@ impl NodeServiceImpl {
)); ));
} }
let (channel_id, was_new) = match self.store.create_channel(&identity, &peer_key) { let (channel_id, was_new) = match self.store.create_channel(identity, &peer_key) {
Ok(pair) => pair, Ok(pair) => pair,
Err(e) => return Promise::err(storage_err(e)), Err(e) => return Promise::err(storage_err(e)),
}; };

View File

@@ -12,7 +12,7 @@ fn storage_err(err: StorageError) -> capnp::Error {
coded_error(E009_STORAGE_ERROR, err) coded_error(E009_STORAGE_ERROR, err)
} }
const MAX_KEYPACKAGE_BYTES: usize = 1 * 1024 * 1024; // 1 MB cap per KeyPackage const MAX_KEYPACKAGE_BYTES: usize = 1024 * 1024; // 1 MB cap per KeyPackage
impl NodeServiceImpl { impl NodeServiceImpl {
pub fn handle_upload_key_package( pub fn handle_upload_key_package(

View File

@@ -221,6 +221,7 @@ pub struct NodeServiceImpl {
} }
impl NodeServiceImpl { impl NodeServiceImpl {
#[allow(clippy::too_many_arguments)]
pub fn new( pub fn new(
store: Arc<dyn Store>, store: Arc<dyn Store>,
waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>, waiters: Arc<DashMap<Vec<u8>, Arc<Notify>>>,
@@ -254,6 +255,7 @@ impl NodeServiceImpl {
} }
} }
#[allow(clippy::too_many_arguments)]
pub async fn handle_node_connection( pub async fn handle_node_connection(
connecting: quinn::Connecting, connecting: quinn::Connecting,
store: Arc<dyn Store>, store: Arc<dyn Store>,

View File

@@ -147,6 +147,7 @@ pub trait Store: Send + Sync {
fn create_channel(&self, member_a: &[u8], member_b: &[u8]) -> Result<(Vec<u8>, bool), StorageError>; fn create_channel(&self, member_a: &[u8], member_b: &[u8]) -> Result<(Vec<u8>, bool), StorageError>;
/// Get the two members of a channel by channel_id (16 bytes). Returns (member_a, member_b) in sorted order. /// Get the two members of a channel by channel_id (16 bytes). Returns (member_a, member_b) in sorted order.
#[allow(clippy::type_complexity)]
fn get_channel_members(&self, channel_id: &[u8]) -> Result<Option<(Vec<u8>, Vec<u8>)>, StorageError>; fn get_channel_members(&self, channel_id: &[u8]) -> Result<Option<(Vec<u8>, Vec<u8>)>, StorageError>;
// ── Federation ────────────────────────────────────────────────────────── // ── Federation ──────────────────────────────────────────────────────────
@@ -232,6 +233,7 @@ pub struct FileBackedStore {
channels_path: PathBuf, channels_path: PathBuf,
key_packages: Mutex<HashMap<Vec<u8>, VecDeque<Vec<u8>>>>, key_packages: Mutex<HashMap<Vec<u8>, VecDeque<Vec<u8>>>>,
deliveries: Mutex<QueueMapV3>, deliveries: Mutex<QueueMapV3>,
#[allow(clippy::type_complexity)]
channels: Mutex<HashMap<Vec<u8>, (Vec<u8>, Vec<u8>)>>, channels: Mutex<HashMap<Vec<u8>, (Vec<u8>, Vec<u8>)>>,
hybrid_keys: Mutex<HashMap<Vec<u8>, Vec<u8>>>, hybrid_keys: Mutex<HashMap<Vec<u8>, Vec<u8>>>,
users: Mutex<HashMap<String, Vec<u8>>>, users: Mutex<HashMap<String, Vec<u8>>>,
@@ -282,6 +284,7 @@ impl FileBackedStore {
}) })
} }
#[allow(clippy::type_complexity)]
fn load_channels( fn load_channels(
path: &Path, path: &Path,
) -> Result<HashMap<Vec<u8>, (Vec<u8>, Vec<u8>)>, StorageError> { ) -> Result<HashMap<Vec<u8>, (Vec<u8>, Vec<u8>)>, StorageError> {
@@ -435,13 +438,13 @@ impl Store for FileBackedStore {
map.entry(identity_key.to_vec()) map.entry(identity_key.to_vec())
.or_default() .or_default()
.push_back(package); .push_back(package);
self.flush_kp_map(&self.kp_path, &*map) self.flush_kp_map(&self.kp_path, &map)
} }
fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> { fn fetch_key_package(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
let mut map = lock(&self.key_packages)?; let mut map = lock(&self.key_packages)?;
let package = map.get_mut(identity_key).and_then(|q| q.pop_front()); let package = map.get_mut(identity_key).and_then(|q| q.pop_front());
self.flush_kp_map(&self.kp_path, &*map)?; self.flush_kp_map(&self.kp_path, &map)?;
Ok(package) Ok(package)
} }
@@ -460,7 +463,7 @@ impl Store for FileBackedStore {
let seq = *entry; let seq = *entry;
*entry = seq + 1; *entry = seq + 1;
inner.map.entry(key).or_default().push_back(SeqEntry { seq, data: payload }); inner.map.entry(key).or_default().push_back(SeqEntry { seq, data: payload });
self.flush_delivery_map(&self.ds_path, &*inner)?; self.flush_delivery_map(&self.ds_path, &inner)?;
Ok(seq) Ok(seq)
} }
@@ -479,7 +482,7 @@ impl Store for FileBackedStore {
.get_mut(&key) .get_mut(&key)
.map(|q| q.drain(..).map(|e| (e.seq, e.data)).collect()) .map(|q| q.drain(..).map(|e| (e.seq, e.data)).collect())
.unwrap_or_default(); .unwrap_or_default();
self.flush_delivery_map(&self.ds_path, &*inner)?; self.flush_delivery_map(&self.ds_path, &inner)?;
Ok(messages) Ok(messages)
} }
@@ -502,7 +505,7 @@ impl Store for FileBackedStore {
q.drain(..count).map(|e| (e.seq, e.data)).collect() q.drain(..count).map(|e| (e.seq, e.data)).collect()
}) })
.unwrap_or_default(); .unwrap_or_default();
self.flush_delivery_map(&self.ds_path, &*inner)?; self.flush_delivery_map(&self.ds_path, &inner)?;
Ok(messages) Ok(messages)
} }
@@ -527,7 +530,7 @@ impl Store for FileBackedStore {
) -> Result<(), StorageError> { ) -> Result<(), StorageError> {
let mut map = lock(&self.hybrid_keys)?; let mut map = lock(&self.hybrid_keys)?;
map.insert(identity_key.to_vec(), hybrid_pk); map.insert(identity_key.to_vec(), hybrid_pk);
self.flush_hybrid_keys(&self.hk_path, &*map) self.flush_hybrid_keys(&self.hk_path, &map)
} }
fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> { fn fetch_hybrid_key(&self, identity_key: &[u8]) -> Result<Option<Vec<u8>>, StorageError> {
@@ -615,7 +618,7 @@ impl Store for FileBackedStore {
v.insert(record); v.insert(record);
} }
} }
self.flush_users(&self.users_path, &*map) self.flush_users(&self.users_path, &map)
} }
fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> { fn get_user_record(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
@@ -635,7 +638,7 @@ impl Store for FileBackedStore {
) -> Result<(), StorageError> { ) -> Result<(), StorageError> {
let mut map = lock(&self.identity_keys)?; let mut map = lock(&self.identity_keys)?;
map.insert(username.to_string(), identity_key); map.insert(username.to_string(), identity_key);
self.flush_map_string_bytes(&self.identity_keys_path, &*map) self.flush_map_string_bytes(&self.identity_keys_path, &map)
} }
fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> { fn get_user_identity_key(&self, username: &str) -> Result<Option<Vec<u8>>, StorageError> {
@@ -697,7 +700,7 @@ impl Store for FileBackedStore {
} else { } else {
0 0
}; };
self.flush_delivery_map(&self.ds_path, &*inner)?; self.flush_delivery_map(&self.ds_path, &inner)?;
Ok(removed) Ok(removed)
} }
@@ -730,7 +733,7 @@ impl Store for FileBackedStore {
rand::thread_rng().fill_bytes(&mut channel_id); rand::thread_rng().fill_bytes(&mut channel_id);
let channel_id = channel_id.to_vec(); let channel_id = channel_id.to_vec();
map.insert(channel_id.clone(), (a, b)); map.insert(channel_id.clone(), (a, b));
self.flush_channels(&self.channels_path, &*map)?; self.flush_channels(&self.channels_path, &map)?;
Ok((channel_id, true)) Ok((channel_id, true))
} }

View File

@@ -12,11 +12,17 @@ WORKDIR /build
# Copy manifests first so dependency layers are cached independently of source. # Copy manifests first so dependency layers are cached independently of source.
COPY Cargo.toml Cargo.lock ./ COPY Cargo.toml Cargo.lock ./
COPY crates/quicproquo-core/Cargo.toml crates/quicproquo-core/Cargo.toml COPY crates/quicproquo-core/Cargo.toml crates/quicproquo-core/Cargo.toml
COPY crates/quicproquo-proto/Cargo.toml crates/quicproquo-proto/Cargo.toml COPY crates/quicproquo-proto/Cargo.toml crates/quicproquo-proto/Cargo.toml
COPY crates/quicproquo-server/Cargo.toml crates/quicproquo-server/Cargo.toml COPY crates/quicproquo-server/Cargo.toml crates/quicproquo-server/Cargo.toml
COPY crates/quicproquo-client/Cargo.toml crates/quicproquo-client/Cargo.toml COPY crates/quicproquo-client/Cargo.toml crates/quicproquo-client/Cargo.toml
COPY crates/quicproquo-p2p/Cargo.toml crates/quicproquo-p2p/Cargo.toml COPY crates/quicproquo-p2p/Cargo.toml crates/quicproquo-p2p/Cargo.toml
COPY crates/quicproquo-bot/Cargo.toml crates/quicproquo-bot/Cargo.toml
COPY crates/quicproquo-gen/Cargo.toml crates/quicproquo-gen/Cargo.toml
COPY crates/quicproquo-kt/Cargo.toml crates/quicproquo-kt/Cargo.toml
COPY crates/quicproquo-plugin-api/Cargo.toml crates/quicproquo-plugin-api/Cargo.toml
COPY crates/quicproquo-gui/Cargo.toml crates/quicproquo-gui/Cargo.toml
COPY crates/quicproquo-mobile/Cargo.toml crates/quicproquo-mobile/Cargo.toml
# Create dummy source files so `cargo build` can resolve the dependency graph # Create dummy source files so `cargo build` can resolve the dependency graph
# and cache the compiled dependencies before copying real source. # and cache the compiled dependencies before copying real source.
@@ -26,11 +32,23 @@ RUN mkdir -p \
crates/quicproquo-server/src \ crates/quicproquo-server/src \
crates/quicproquo-client/src \ crates/quicproquo-client/src \
crates/quicproquo-p2p/src \ crates/quicproquo-p2p/src \
crates/quicproquo-bot/src \
crates/quicproquo-gen/src \
crates/quicproquo-kt/src \
crates/quicproquo-plugin-api/src \
crates/quicproquo-gui/src \
crates/quicproquo-mobile/src \
&& echo 'fn main() {}' > crates/quicproquo-server/src/main.rs \ && echo 'fn main() {}' > crates/quicproquo-server/src/main.rs \
&& echo 'fn main() {}' > crates/quicproquo-client/src/main.rs \ && echo 'fn main() {}' > crates/quicproquo-client/src/main.rs \
&& echo 'fn main() {}' > crates/quicproquo-gen/src/main.rs \
&& echo 'fn main() {}' > crates/quicproquo-bot/src/main.rs \
&& touch crates/quicproquo-core/src/lib.rs \ && touch crates/quicproquo-core/src/lib.rs \
&& touch crates/quicproquo-proto/src/lib.rs \ && touch crates/quicproquo-proto/src/lib.rs \
&& touch crates/quicproquo-p2p/src/lib.rs && touch crates/quicproquo-p2p/src/lib.rs \
&& touch crates/quicproquo-kt/src/lib.rs \
&& touch crates/quicproquo-plugin-api/src/lib.rs \
&& touch crates/quicproquo-gui/src/lib.rs \
&& touch crates/quicproquo-mobile/src/lib.rs
# Schemas must exist before the proto crate's build.rs runs. # Schemas must exist before the proto crate's build.rs runs.
COPY schemas/ schemas/ COPY schemas/ schemas/
@@ -46,6 +64,8 @@ RUN touch \
crates/quicproquo-core/src/lib.rs \ crates/quicproquo-core/src/lib.rs \
crates/quicproquo-proto/src/lib.rs \ crates/quicproquo-proto/src/lib.rs \
crates/quicproquo-p2p/src/lib.rs \ crates/quicproquo-p2p/src/lib.rs \
crates/quicproquo-kt/src/lib.rs \
crates/quicproquo-plugin-api/src/lib.rs \
crates/quicproquo-server/src/main.rs \ crates/quicproquo-server/src/main.rs \
crates/quicproquo-client/src/main.rs crates/quicproquo-client/src/main.rs

View File

@@ -17,6 +17,7 @@
- [Building from Source](getting-started/building.md) - [Building from Source](getting-started/building.md)
- [Running the Server](getting-started/running-the-server.md) - [Running the Server](getting-started/running-the-server.md)
- [Running the Client](getting-started/running-the-client.md) - [Running the Client](getting-started/running-the-client.md)
- [TLS in quicproquo](getting-started/tls.md)
- [Certificate Lifecycle and CA-Signed TLS](getting-started/certificate-lifecycle.md) - [Certificate Lifecycle and CA-Signed TLS](getting-started/certificate-lifecycle.md)
- [Docker Deployment](getting-started/docker.md) - [Docker Deployment](getting-started/docker.md)
- [Bot SDK](getting-started/bot-sdk.md) - [Bot SDK](getting-started/bot-sdk.md)

View File

@@ -0,0 +1,100 @@
# TLS in quicproquo
quicproquo uses QUIC (RFC 9000) for all client-server communication. QUIC mandates TLS 1.3, so every connection is encrypted and authenticated at the transport layer — there is no plaintext mode.
## How it works
The server holds a TLS certificate and private key (DER format). On startup it either loads existing files or, in development mode, generates a self-signed certificate automatically. The client authenticates the server by verifying its certificate against a trusted root provided via `--ca-cert` (or `QPQ_CA_CERT`).
The TLS handshake negotiates the ALPN protocol `capnp`, after which the QUIC bi-directional stream carries Cap'n Proto RPC traffic.
## Certificate pinning with `--ca-cert`
By default the client trusts exactly the certificate (or CA) in the file given by `--ca-cert`:
```bash
qpq --ca-cert data/server-cert.der --server-name localhost health --server 127.0.0.1:7000
```
This is a form of **certificate pinning**: the client will only connect to a server whose certificate chains to the provided root. For single-server deployments, pass the server's own self-signed certificate. For CA-issued certificates, pass the CA's root certificate instead.
| Flag / Env var | Purpose |
|---|---|
| `--ca-cert` / `QPQ_CA_CERT` | Path to trusted root certificate (DER) |
| `--server-name` / `QPQ_SERVER_NAME` | Expected TLS server name (must match certificate SAN) |
## The `--danger-accept-invalid-certs` flag
For local development and testing you can skip certificate verification entirely:
```bash
qpq --danger-accept-invalid-certs health --server 127.0.0.1:7000
```
Or via the environment:
```bash
export QPQ_DANGER_ACCEPT_INVALID_CERTS=true
qpq health --server 127.0.0.1:7000
```
When active, the client prints a warning to stderr:
```
WARNING: TLS verification disabled — insecure mode
```
**Never use this flag in production.** It disables all certificate checks, making the connection vulnerable to man-in-the-middle attacks. It exists solely so that developers can iterate without managing certificates during local testing.
## Generating self-signed certificates for development
### Using rcgen (Rust)
The server generates a self-signed certificate automatically when the cert/key files are missing (unless `QPQ_PRODUCTION=1` is set). The generated files are written to:
- `data/server-cert.der` — DER-encoded certificate
- `data/server-key.der` — DER-encoded PKCS#8 private key
### Using openssl
To generate a self-signed certificate manually:
```bash
# Generate a private key and self-signed certificate (PEM)
openssl req -x509 -newkey ec -pkeyopt ec_paramgen_curve:prime256v1 \
-keyout key.pem -out cert.pem -days 365 -nodes \
-subj "/CN=localhost" \
-addext "subjectAltName=DNS:localhost,IP:127.0.0.1"
# Convert to DER format (required by quicproquo)
openssl x509 -in cert.pem -outform DER -out data/server-cert.der
openssl pkcs8 -topk8 -inform PEM -outform DER -in key.pem -out data/server-key.der -nocrypt
```
Point the server at the DER files:
```bash
export QPQ_TLS_CERT=data/server-cert.der
export QPQ_TLS_KEY=data/server-key.der
cargo run -p quicproquo-server
```
And the client at the certificate:
```bash
qpq --ca-cert data/server-cert.der --server-name localhost repl
```
## CA-issued certificates
For production deployments with a public CA (e.g. Let's Encrypt):
1. Obtain the certificate and key (e.g. via certbot).
2. Convert to DER format as shown above.
3. Configure the client to trust the CA root rather than the server certificate directly:
```bash
qpq --ca-cert /etc/ssl/certs/isrg-root-x1.der --server-name chat.example.com repl
```
See [Certificate Lifecycle and CA-Signed TLS](certificate-lifecycle.md) for rotation, OCSP, and operational details.